Move Codec2-related code from hardware/google/av
Test: None
Bug: 112362730
Change-Id: Ie2f8ff431d65c40333f267ab9877d47089adeea4
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
new file mode 100644
index 0000000..831fb35
--- /dev/null
+++ b/media/codec2/sfplugin/Android.bp
@@ -0,0 +1,61 @@
+cc_library_shared {
+ name: "libstagefright_ccodec",
+
+ srcs: [
+ "C2OMXNode.cpp",
+ "CCodec.cpp",
+ "CCodecBufferChannel.cpp",
+ "CCodecConfig.cpp",
+ "Codec2Buffer.cpp",
+ "Codec2InfoBuilder.cpp",
+ "ReflectedParamUpdater.cpp",
+ "SkipCutBuffer.cpp",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ header_libs: [
+ "libstagefright_codec2_internal",
+ ],
+
+ shared_libs: [
+ "android.hardware.cas.native@1.0",
+ "android.hardware.graphics.bufferqueue@1.0",
+ "android.hardware.media.omx@1.0",
+ "hardware.google.media.c2@1.0",
+ "libbase",
+ "libbinder",
+ "libcodec2_hidl_client",
+ "libcutils",
+ "libgui",
+ "libhidlallocatorutils",
+ "libhidlbase",
+ "liblog",
+ "libmedia",
+ "libmedia_omx",
+ "libstagefright_bufferqueue_helper",
+ "libstagefright_ccodec_utils",
+ "libstagefright_codec2",
+ "libstagefright_codec2_vndk",
+ "libstagefright_codecbase",
+ "libstagefright_foundation",
+ "libstagefright_omx_utils",
+ "libstagefright_xmlparser",
+ "libui",
+ "libutils",
+ ],
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+}
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
new file mode 100644
index 0000000..749fd7a
--- /dev/null
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2OMXNode"
+#include <log/log.h>
+
+#include <C2AllocatorGralloc.h>
+#include <C2BlockInternal.h>
+#include <C2Component.h>
+#include <C2PlatformSupport.h>
+
+#include <OMX_Component.h>
+#include <OMX_Index.h>
+#include <OMX_IndexExt.h>
+
+#include <media/stagefright/omx/OMXUtils.h>
+#include <media/stagefright/MediaErrors.h>
+#include <ui/Fence.h>
+#include <ui/GraphicBuffer.h>
+
+#include "C2OMXNode.h"
+
+namespace android {
+
+namespace {
+
+class Buffer2D : public C2Buffer {
+public:
+ explicit Buffer2D(C2ConstGraphicBlock block) : C2Buffer({ block }) {}
+};
+
+} // namespace
+
+C2OMXNode::C2OMXNode(const std::shared_ptr<Codec2Client::Component> &comp)
+ : mComp(comp), mFrameIndex(0), mWidth(0), mHeight(0),
+ mAdjustTimestampGapUs(0), mFirstInputFrame(true) {
+ // TODO: read from intf()
+ if (!strncmp(comp->getName().c_str(), "c2.android.", 11)) {
+ mUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+ } else {
+ mUsage = GRALLOC_USAGE_HW_VIDEO_ENCODER;
+ }
+}
+
+status_t C2OMXNode::freeNode() {
+ mComp.reset();
+ return OK;
+}
+
+status_t C2OMXNode::sendCommand(OMX_COMMANDTYPE cmd, OMX_S32 param) {
+ if (cmd == OMX_CommandStateSet && param == OMX_StateLoaded) {
+ // Reset first input frame so if C2OMXNode is recycled, the timestamp does not become
+ // negative. This is a workaround for HW codecs that do not handle timestamp rollover.
+ mFirstInputFrame = true;
+ }
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::getParameter(OMX_INDEXTYPE index, void *params, size_t size) {
+ status_t err = ERROR_UNSUPPORTED;
+ switch ((uint32_t)index) {
+ case OMX_IndexParamConsumerUsageBits: {
+ OMX_U32 *usage = (OMX_U32 *)params;
+ *usage = mUsage;
+ err = OK;
+ break;
+ }
+ case OMX_IndexParamPortDefinition: {
+ if (size < sizeof(OMX_PARAM_PORTDEFINITIONTYPE)) {
+ return BAD_VALUE;
+ }
+ OMX_PARAM_PORTDEFINITIONTYPE *pDef = (OMX_PARAM_PORTDEFINITIONTYPE *)params;
+ // TODO: read these from intf()
+ pDef->nBufferCountActual = 16;
+ pDef->eDomain = OMX_PortDomainVideo;
+ pDef->format.video.nFrameWidth = mWidth;
+ pDef->format.video.nFrameHeight = mHeight;
+ err = OK;
+ break;
+ }
+ default:
+ break;
+ }
+ return err;
+}
+
+status_t C2OMXNode::setParameter(OMX_INDEXTYPE index, const void *params, size_t size) {
+ // handle max/fixed frame duration control
+ if (index == (OMX_INDEXTYPE)OMX_IndexParamMaxFrameDurationForBitrateControl
+ && params != NULL
+ && size == sizeof(OMX_PARAM_U32TYPE)) {
+ // The incoming number is an int32_t contained in OMX_U32.
+ mAdjustTimestampGapUs = (int32_t)((OMX_PARAM_U32TYPE*)params)->nU32;
+ return OK;
+ }
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::getConfig(OMX_INDEXTYPE index, void *config, size_t size) {
+ (void)index;
+ (void)config;
+ (void)size;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::setConfig(OMX_INDEXTYPE index, const void *config, size_t size) {
+ (void)index;
+ (void)config;
+ (void)size;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::setPortMode(OMX_U32 portIndex, IOMX::PortMode mode) {
+ (void)portIndex;
+ (void)mode;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::prepareForAdaptivePlayback(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
+ (void)portIndex;
+ (void)enable;
+ (void)maxFrameWidth;
+ (void)maxFrameHeight;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
+ (void)portIndex;
+ (void)tunneled;
+ (void)audioHwSync;
+ *sidebandHandle = nullptr;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::getGraphicBufferUsage(OMX_U32 portIndex, OMX_U32* usage) {
+ (void)portIndex;
+ *usage = 0;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::setInputSurface(const sp<IOMXBufferSource> &bufferSource) {
+ c2_status_t err = GetCodec2PlatformAllocatorStore()->fetchAllocator(
+ C2PlatformAllocatorStore::GRALLOC,
+ &mAllocator);
+ if (err != OK) {
+ return UNKNOWN_ERROR;
+ }
+ mBufferSource = bufferSource;
+ return OK;
+}
+
+status_t C2OMXNode::allocateSecureBuffer(
+ OMX_U32 portIndex, size_t size, buffer_id *buffer,
+ void **bufferData, sp<NativeHandle> *nativeHandle) {
+ (void)portIndex;
+ (void)size;
+ (void)nativeHandle;
+ *buffer = 0;
+ *bufferData = nullptr;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::useBuffer(
+ OMX_U32 portIndex, const OMXBuffer &omxBuf, buffer_id *buffer) {
+ (void)portIndex;
+ (void)omxBuf;
+ *buffer = 0;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::freeBuffer(OMX_U32 portIndex, buffer_id buffer) {
+ (void)portIndex;
+ (void)buffer;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd) {
+ (void)buffer;
+ (void)omxBuf;
+ (void)fenceFd;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::emptyBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+ // TODO: better fence handling
+ if (fenceFd >= 0) {
+ sp<Fence> fence = new Fence(fenceFd);
+ fence->waitForever(LOG_TAG);
+ }
+ std::shared_ptr<Codec2Client::Component> comp = mComp.lock();
+ if (!comp) {
+ return NO_INIT;
+ }
+
+ uint32_t c2Flags = (flags & OMX_BUFFERFLAG_EOS)
+ ? C2FrameData::FLAG_END_OF_STREAM : 0;
+ std::shared_ptr<C2GraphicBlock> block;
+
+ C2Handle *handle = nullptr;
+ if (omxBuf.mBufferType == OMXBuffer::kBufferTypeANWBuffer
+ && omxBuf.mGraphicBuffer != nullptr) {
+ std::shared_ptr<C2GraphicAllocation> alloc;
+ handle = WrapNativeCodec2GrallocHandle(
+ native_handle_clone(omxBuf.mGraphicBuffer->handle),
+ omxBuf.mGraphicBuffer->width,
+ omxBuf.mGraphicBuffer->height,
+ omxBuf.mGraphicBuffer->format,
+ omxBuf.mGraphicBuffer->usage,
+ omxBuf.mGraphicBuffer->stride);
+ c2_status_t err = mAllocator->priorGraphicAllocation(handle, &alloc);
+ if (err != OK) {
+ return UNKNOWN_ERROR;
+ }
+ block = _C2BlockFactory::CreateGraphicBlock(alloc);
+ } else if (!(flags & OMX_BUFFERFLAG_EOS)) {
+ return BAD_VALUE;
+ }
+
+ std::unique_ptr<C2Work> work(new C2Work);
+ work->input.flags = (C2FrameData::flags_t)c2Flags;
+ work->input.ordinal.timestamp = timestamp;
+
+ // WORKAROUND: adjust timestamp based on gapUs
+ {
+ work->input.ordinal.customOrdinal = timestamp; // save input timestamp
+ if (mFirstInputFrame) {
+ // grab timestamps on first frame
+ mPrevInputTimestamp = timestamp;
+ mPrevCodecTimestamp = timestamp;
+ mFirstInputFrame = false;
+ } else if (mAdjustTimestampGapUs > 0) {
+ work->input.ordinal.timestamp =
+ mPrevCodecTimestamp
+ + c2_min((timestamp - mPrevInputTimestamp).peek(), mAdjustTimestampGapUs);
+ } else if (mAdjustTimestampGapUs < 0) {
+ work->input.ordinal.timestamp = mPrevCodecTimestamp - mAdjustTimestampGapUs;
+ }
+ mPrevInputTimestamp = work->input.ordinal.customOrdinal;
+ mPrevCodecTimestamp = work->input.ordinal.timestamp;
+ ALOGV("adjusting %lld to %lld (gap=%lld)",
+ work->input.ordinal.customOrdinal.peekll(),
+ work->input.ordinal.timestamp.peekll(),
+ (long long)mAdjustTimestampGapUs);
+ }
+
+ work->input.ordinal.frameIndex = mFrameIndex++;
+ work->input.buffers.clear();
+ if (block) {
+ std::shared_ptr<C2Buffer> c2Buffer(
+ // TODO: fence
+ new Buffer2D(block->share(
+ C2Rect(block->width(), block->height()), ::C2Fence())),
+ [buffer, source = getSource()](C2Buffer *ptr) {
+ delete ptr;
+ // TODO: fence
+ (void)source->onInputBufferEmptied(buffer, -1);
+ });
+ work->input.buffers.push_back(c2Buffer);
+ }
+ work->worklets.clear();
+ work->worklets.emplace_back(new C2Worklet);
+ std::list<std::unique_ptr<C2Work>> items;
+ items.push_back(std::move(work));
+
+ c2_status_t err = comp->queue(&items);
+ if (err != C2_OK) {
+ return UNKNOWN_ERROR;
+ }
+
+ return OK;
+}
+
+status_t C2OMXNode::getExtensionIndex(
+ const char *parameterName, OMX_INDEXTYPE *index) {
+ (void)parameterName;
+ *index = OMX_IndexMax;
+ return ERROR_UNSUPPORTED;
+}
+
+status_t C2OMXNode::dispatchMessage(const omx_message& msg) {
+ if (msg.type != omx_message::EVENT) {
+ return ERROR_UNSUPPORTED;
+ }
+ if (msg.u.event_data.event != OMX_EventDataSpaceChanged) {
+ return ERROR_UNSUPPORTED;
+ }
+ android_dataspace dataSpace = (android_dataspace)msg.u.event_data.data1;
+ uint32_t pixelFormat = msg.u.event_data.data3;
+
+ // TODO: set dataspace on component to see if it impacts color aspects
+ ALOGD("dataspace changed to %#x pixel format: %#x", dataSpace, pixelFormat);
+ return OK;
+}
+
+sp<IOMXBufferSource> C2OMXNode::getSource() {
+ return mBufferSource;
+}
+
+void C2OMXNode::setFrameSize(uint32_t width, uint32_t height) {
+ mWidth = width;
+ mHeight = height;
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/C2OMXNode.h b/media/codec2/sfplugin/C2OMXNode.h
new file mode 100644
index 0000000..b5a815e
--- /dev/null
+++ b/media/codec2/sfplugin/C2OMXNode.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2_OMX_NODE_H_
+#define C2_OMX_NODE_H_
+
+#include <atomic>
+
+#include <android/IOMXBufferSource.h>
+#include <media/IOMX.h>
+#include <media/OMXBuffer.h>
+#include <codec2/hidl/client.h>
+
+namespace android {
+
+/**
+ * IOmxNode implementation around codec 2.0 component, only to be used in
+ * IGraphicBufferSource::configure. Only subset of IOmxNode API is implemented
+ * and others are left as stub. As a result, one cannot expect this IOmxNode
+ * to work in any other usage than IGraphicBufferSource.
+ */
+struct C2OMXNode : public BnOMXNode {
+ explicit C2OMXNode(const std::shared_ptr<Codec2Client::Component> &comp);
+ ~C2OMXNode() override = default;
+
+ // IOMXNode
+ status_t freeNode() override;
+ status_t sendCommand(OMX_COMMANDTYPE cmd, OMX_S32 param) override;
+ status_t getParameter(
+ OMX_INDEXTYPE index, void *params, size_t size) override;
+ status_t setParameter(
+ OMX_INDEXTYPE index, const void *params, size_t size) override;
+ status_t getConfig(
+ OMX_INDEXTYPE index, void *params, size_t size) override;
+ status_t setConfig(
+ OMX_INDEXTYPE index, const void *params, size_t size) override;
+ status_t setPortMode(OMX_U32 port_index, IOMX::PortMode mode) override;
+ status_t prepareForAdaptivePlayback(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) override;
+ status_t configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) override;
+ status_t getGraphicBufferUsage(
+ OMX_U32 port_index, OMX_U32* usage) override;
+ status_t setInputSurface(
+ const sp<IOMXBufferSource> &bufferSource) override;
+ status_t allocateSecureBuffer(
+ OMX_U32 port_index, size_t size, buffer_id *buffer,
+ void **buffer_data, sp<NativeHandle> *native_handle) override;
+ status_t useBuffer(
+ OMX_U32 port_index, const OMXBuffer &omxBuf, buffer_id *buffer) override;
+ status_t freeBuffer(
+ OMX_U32 port_index, buffer_id buffer) override;
+ status_t fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd) override;
+ status_t emptyBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) override;
+ status_t getExtensionIndex(
+ const char *parameter_name,
+ OMX_INDEXTYPE *index) override;
+ status_t dispatchMessage(const omx_message &msg) override;
+
+ sp<IOMXBufferSource> getSource();
+ void setFrameSize(uint32_t width, uint32_t height);
+
+private:
+ std::weak_ptr<Codec2Client::Component> mComp;
+ sp<IOMXBufferSource> mBufferSource;
+ std::shared_ptr<C2Allocator> mAllocator;
+ std::atomic_uint64_t mFrameIndex;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ uint64_t mUsage;
+
+ // WORKAROUND: timestamp adjustment
+
+ // if >0: this is the max timestamp gap, if <0: this is -1 times the fixed timestamp gap
+ // if 0: no timestamp adjustment is made
+ // note that C2OMXNode can be recycled between encoding sessions.
+ int32_t mAdjustTimestampGapUs;
+ bool mFirstInputFrame; // true for first input
+ c2_cntr64_t mPrevInputTimestamp; // input timestamp for previous frame
+ c2_cntr64_t mPrevCodecTimestamp; // adjusted (codec) timestamp for previous frame
+};
+
+} // namespace android
+
+#endif // C2_OMX_NODE_H_
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
new file mode 100644
index 0000000..f00c62e
--- /dev/null
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -0,0 +1,1746 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CCodec"
+#include <utils/Log.h>
+
+#include <sstream>
+#include <thread>
+
+#include <C2Config.h>
+#include <C2Debug.h>
+#include <C2ParamInternal.h>
+#include <C2PlatformSupport.h>
+
+#include <android/IGraphicBufferSource.h>
+#include <android/IOMXBufferSource.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <android-base/stringprintf.h>
+#include <cutils/properties.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <gui/bufferqueue/1.0/H2BGraphicBufferProducer.h>
+#include <media/omx/1.0/WGraphicBufferSource.h>
+#include <media/openmax/OMX_IndexExt.h>
+#include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/MediaCodecConstants.h>
+#include <media/stagefright/PersistentSurface.h>
+#include <media/stagefright/codec2/1.0/InputSurface.h>
+
+#include "C2OMXNode.h"
+#include "CCodec.h"
+#include "CCodecBufferChannel.h"
+#include "InputSurfaceWrapper.h"
+
+extern "C" android::PersistentSurface *CreateInputSurface();
+
+namespace android {
+
+using namespace std::chrono_literals;
+using ::android::hardware::graphics::bufferqueue::V1_0::utils::H2BGraphicBufferProducer;
+using android::base::StringPrintf;
+using BGraphicBufferSource = ::android::IGraphicBufferSource;
+using ::hardware::google::media::c2::V1_0::IInputSurface;
+
+namespace {
+
+class CCodecWatchdog : public AHandler {
+private:
+ enum {
+ kWhatWatch,
+ };
+ constexpr static int64_t kWatchIntervalUs = 3300000; // 3.3 secs
+
+public:
+ static sp<CCodecWatchdog> getInstance() {
+ static sp<CCodecWatchdog> instance(new CCodecWatchdog);
+ static std::once_flag flag;
+ // Call Init() only once.
+ std::call_once(flag, Init, instance);
+ return instance;
+ }
+
+ ~CCodecWatchdog() = default;
+
+ void watch(sp<CCodec> codec) {
+ bool shouldPost = false;
+ {
+ Mutexed<std::set<wp<CCodec>>>::Locked codecs(mCodecsToWatch);
+ // If a watch message is in flight, piggy-back this instance as well.
+ // Otherwise, post a new watch message.
+ shouldPost = codecs->empty();
+ codecs->emplace(codec);
+ }
+ if (shouldPost) {
+ ALOGV("posting watch message");
+ (new AMessage(kWhatWatch, this))->post(kWatchIntervalUs);
+ }
+ }
+
+protected:
+ void onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatWatch: {
+ Mutexed<std::set<wp<CCodec>>>::Locked codecs(mCodecsToWatch);
+ ALOGV("watch for %zu codecs", codecs->size());
+ for (auto it = codecs->begin(); it != codecs->end(); ++it) {
+ sp<CCodec> codec = it->promote();
+ if (codec == nullptr) {
+ continue;
+ }
+ codec->initiateReleaseIfStuck();
+ }
+ codecs->clear();
+ break;
+ }
+
+ default: {
+ TRESPASS("CCodecWatchdog: unrecognized message");
+ }
+ }
+ }
+
+private:
+ CCodecWatchdog() : mLooper(new ALooper) {}
+
+ static void Init(const sp<CCodecWatchdog> &thiz) {
+ ALOGV("Init");
+ thiz->mLooper->setName("CCodecWatchdog");
+ thiz->mLooper->registerHandler(thiz);
+ thiz->mLooper->start();
+ }
+
+ sp<ALooper> mLooper;
+
+ Mutexed<std::set<wp<CCodec>>> mCodecsToWatch;
+};
+
+class C2InputSurfaceWrapper : public InputSurfaceWrapper {
+public:
+ explicit C2InputSurfaceWrapper(
+ const std::shared_ptr<Codec2Client::InputSurface> &surface) :
+ mSurface(surface) {
+ }
+
+ ~C2InputSurfaceWrapper() override = default;
+
+ status_t connect(const std::shared_ptr<Codec2Client::Component> &comp) override {
+ if (mConnection != nullptr) {
+ return ALREADY_EXISTS;
+ }
+ return toStatusT(mSurface->connectToComponent(comp, &mConnection),
+ C2_OPERATION_InputSurface_connectToComponent);
+ }
+
+ void disconnect() override {
+ if (mConnection != nullptr) {
+ mConnection->disconnect();
+ mConnection = nullptr;
+ }
+ }
+
+ status_t start() override {
+ // InputSurface does not distinguish started state
+ return OK;
+ }
+
+ status_t signalEndOfInputStream() override {
+ C2InputSurfaceEosTuning eos(true);
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ c2_status_t err = mSurface->getConfigurable()->config({&eos}, C2_MAY_BLOCK, &failures);
+ if (err != C2_OK) {
+ return UNKNOWN_ERROR;
+ }
+ return OK;
+ }
+
+ status_t configure(Config &config __unused) {
+ // TODO
+ return OK;
+ }
+
+private:
+ std::shared_ptr<Codec2Client::InputSurface> mSurface;
+ std::shared_ptr<Codec2Client::InputSurfaceConnection> mConnection;
+};
+
+class GraphicBufferSourceWrapper : public InputSurfaceWrapper {
+public:
+// explicit GraphicBufferSourceWrapper(const sp<BGraphicBufferSource> &source) : mSource(source) {}
+ GraphicBufferSourceWrapper(
+ const sp<BGraphicBufferSource> &source,
+ uint32_t width,
+ uint32_t height)
+ : mSource(source), mWidth(width), mHeight(height) {
+ mDataSpace = HAL_DATASPACE_BT709;
+ }
+ ~GraphicBufferSourceWrapper() override = default;
+
+ status_t connect(const std::shared_ptr<Codec2Client::Component> &comp) override {
+ mNode = new C2OMXNode(comp);
+ mNode->setFrameSize(mWidth, mHeight);
+
+ // NOTE: we do not use/pass through color aspects from GraphicBufferSource as we
+ // communicate that directly to the component.
+ mSource->configure(mNode, mDataSpace);
+ return OK;
+ }
+
+ void disconnect() override {
+ if (mNode == nullptr) {
+ return;
+ }
+ sp<IOMXBufferSource> source = mNode->getSource();
+ if (source == nullptr) {
+ ALOGD("GBSWrapper::disconnect: node is not configured with OMXBufferSource.");
+ return;
+ }
+ source->onOmxIdle();
+ source->onOmxLoaded();
+ mNode.clear();
+ }
+
+ status_t GetStatus(const binder::Status &status) {
+ status_t err = OK;
+ if (!status.isOk()) {
+ err = status.serviceSpecificErrorCode();
+ if (err == OK) {
+ err = status.transactionError();
+ if (err == OK) {
+ // binder status failed, but there is no servie or transaction error
+ err = UNKNOWN_ERROR;
+ }
+ }
+ }
+ return err;
+ }
+
+ status_t start() override {
+ sp<IOMXBufferSource> source = mNode->getSource();
+ if (source == nullptr) {
+ return NO_INIT;
+ }
+ constexpr size_t kNumSlots = 16;
+ for (size_t i = 0; i < kNumSlots; ++i) {
+ source->onInputBufferAdded(i);
+ }
+
+ source->onOmxExecuting();
+ return OK;
+ }
+
+ status_t signalEndOfInputStream() override {
+ return GetStatus(mSource->signalEndOfInputStream());
+ }
+
+ status_t configure(Config &config) {
+ std::stringstream status;
+ status_t err = OK;
+
+ // handle each configuration granually, in case we need to handle part of the configuration
+ // elsewhere
+
+ // TRICKY: we do not unset frame delay repeating
+ if (config.mMinFps > 0 && config.mMinFps != mConfig.mMinFps) {
+ int64_t us = 1e6 / config.mMinFps + 0.5;
+ status_t res = GetStatus(mSource->setRepeatPreviousFrameDelayUs(us));
+ status << " minFps=" << config.mMinFps << " => repeatDelayUs=" << us;
+ if (res != OK) {
+ status << " (=> " << asString(res) << ")";
+ err = res;
+ }
+ mConfig.mMinFps = config.mMinFps;
+ }
+
+ // pts gap
+ if (config.mMinAdjustedFps > 0 || config.mFixedAdjustedFps > 0) {
+ if (mNode != nullptr) {
+ OMX_PARAM_U32TYPE ptrGapParam = {};
+ ptrGapParam.nSize = sizeof(OMX_PARAM_U32TYPE);
+ ptrGapParam.nU32 = (config.mMinAdjustedFps > 0)
+ ? c2_min(INT32_MAX + 0., 1e6 / config.mMinAdjustedFps + 0.5)
+ : c2_max(0. - INT32_MAX, -1e6 / config.mFixedAdjustedFps - 0.5);
+ (void)mNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamMaxFrameDurationForBitrateControl,
+ &ptrGapParam, sizeof(ptrGapParam));
+ }
+ }
+
+ // max fps
+ // TRICKY: we do not unset max fps to 0 unless using fixed fps
+ if ((config.mMaxFps > 0 || (config.mFixedAdjustedFps > 0 && config.mMaxFps == 0))
+ && config.mMaxFps != mConfig.mMaxFps) {
+ status_t res = GetStatus(mSource->setMaxFps(config.mMaxFps));
+ status << " maxFps=" << config.mMaxFps;
+ if (res != OK) {
+ status << " (=> " << asString(res) << ")";
+ err = res;
+ }
+ mConfig.mMaxFps = config.mMaxFps;
+ }
+
+ if (config.mTimeOffsetUs != mConfig.mTimeOffsetUs) {
+ status_t res = GetStatus(mSource->setTimeOffsetUs(config.mTimeOffsetUs));
+ status << " timeOffset " << config.mTimeOffsetUs << "us";
+ if (res != OK) {
+ status << " (=> " << asString(res) << ")";
+ err = res;
+ }
+ mConfig.mTimeOffsetUs = config.mTimeOffsetUs;
+ }
+
+ if (config.mCaptureFps != mConfig.mCaptureFps || config.mCodedFps != mConfig.mCodedFps) {
+ status_t res =
+ GetStatus(mSource->setTimeLapseConfig(config.mCodedFps, config.mCaptureFps));
+ status << " timeLapse " << config.mCaptureFps << "fps as " << config.mCodedFps << "fps";
+ if (res != OK) {
+ status << " (=> " << asString(res) << ")";
+ err = res;
+ }
+ mConfig.mCaptureFps = config.mCaptureFps;
+ mConfig.mCodedFps = config.mCodedFps;
+ }
+
+ if (config.mStartAtUs != mConfig.mStartAtUs
+ || (config.mStopped != mConfig.mStopped && !config.mStopped)) {
+ status_t res = GetStatus(mSource->setStartTimeUs(config.mStartAtUs));
+ status << " start at " << config.mStartAtUs << "us";
+ if (res != OK) {
+ status << " (=> " << asString(res) << ")";
+ err = res;
+ }
+ mConfig.mStartAtUs = config.mStartAtUs;
+ mConfig.mStopped = config.mStopped;
+ }
+
+ // suspend-resume
+ if (config.mSuspended != mConfig.mSuspended) {
+ status_t res = GetStatus(mSource->setSuspend(config.mSuspended, config.mSuspendAtUs));
+ status << " " << (config.mSuspended ? "suspend" : "resume")
+ << " at " << config.mSuspendAtUs << "us";
+ if (res != OK) {
+ status << " (=> " << asString(res) << ")";
+ err = res;
+ }
+ mConfig.mSuspended = config.mSuspended;
+ mConfig.mSuspendAtUs = config.mSuspendAtUs;
+ }
+
+ if (config.mStopped != mConfig.mStopped && config.mStopped) {
+ status_t res = GetStatus(mSource->setStopTimeUs(config.mStopAtUs));
+ status << " stop at " << config.mStopAtUs << "us";
+ if (res != OK) {
+ status << " (=> " << asString(res) << ")";
+ err = res;
+ } else {
+ status << " delayUs";
+ res = GetStatus(mSource->getStopTimeOffsetUs(&config.mInputDelayUs));
+ if (res != OK) {
+ status << " (=> " << asString(res) << ")";
+ } else {
+ status << "=" << config.mInputDelayUs << "us";
+ }
+ mConfig.mInputDelayUs = config.mInputDelayUs;
+ }
+ mConfig.mStopAtUs = config.mStopAtUs;
+ mConfig.mStopped = config.mStopped;
+ }
+
+ // color aspects (android._color-aspects)
+
+ // consumer usage
+ ALOGD("ISConfig%s", status.str().c_str());
+ return err;
+ }
+
+private:
+ sp<BGraphicBufferSource> mSource;
+ sp<C2OMXNode> mNode;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ Config mConfig;
+};
+
+class Codec2ClientInterfaceWrapper : public C2ComponentStore {
+ std::shared_ptr<Codec2Client> mClient;
+
+public:
+ Codec2ClientInterfaceWrapper(std::shared_ptr<Codec2Client> client)
+ : mClient(client) { }
+
+ virtual ~Codec2ClientInterfaceWrapper() = default;
+
+ virtual c2_status_t config_sm(
+ const std::vector<C2Param *> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+ return mClient->config(params, C2_MAY_BLOCK, failures);
+ };
+
+ virtual c2_status_t copyBuffer(
+ std::shared_ptr<C2GraphicBuffer>,
+ std::shared_ptr<C2GraphicBuffer>) {
+ return C2_OMITTED;
+ }
+
+ virtual c2_status_t createComponent(
+ C2String, std::shared_ptr<C2Component> *const component) {
+ component->reset();
+ return C2_OMITTED;
+ }
+
+ virtual c2_status_t createInterface(
+ C2String, std::shared_ptr<C2ComponentInterface> *const interface) {
+ interface->reset();
+ return C2_OMITTED;
+ }
+
+ virtual c2_status_t query_sm(
+ const std::vector<C2Param *> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>> *const heapParams) const {
+ return mClient->query(stackParams, heapParamIndices, C2_MAY_BLOCK, heapParams);
+ }
+
+ virtual c2_status_t querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const {
+ return mClient->querySupportedParams(params);
+ }
+
+ virtual c2_status_t querySupportedValues_sm(
+ std::vector<C2FieldSupportedValuesQuery> &fields) const {
+ return mClient->querySupportedValues(fields, C2_MAY_BLOCK);
+ }
+
+ virtual C2String getName() const {
+ return mClient->getName();
+ }
+
+ virtual std::shared_ptr<C2ParamReflector> getParamReflector() const {
+ return mClient->getParamReflector();
+ }
+
+ virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() {
+ return std::vector<std::shared_ptr<const C2Component::Traits>>();
+ }
+};
+
+} // namespace
+
+// CCodec::ClientListener
+
+struct CCodec::ClientListener : public Codec2Client::Listener {
+
+ explicit ClientListener(const wp<CCodec> &codec) : mCodec(codec) {}
+
+ virtual void onWorkDone(
+ const std::weak_ptr<Codec2Client::Component>& component,
+ std::list<std::unique_ptr<C2Work>>& workItems,
+ size_t numDiscardedInputBuffers) override {
+ (void)component;
+ sp<CCodec> codec(mCodec.promote());
+ if (!codec) {
+ return;
+ }
+ codec->onWorkDone(workItems, numDiscardedInputBuffers);
+ }
+
+ virtual void onTripped(
+ const std::weak_ptr<Codec2Client::Component>& component,
+ const std::vector<std::shared_ptr<C2SettingResult>>& settingResult
+ ) override {
+ // TODO
+ (void)component;
+ (void)settingResult;
+ }
+
+ virtual void onError(
+ const std::weak_ptr<Codec2Client::Component>& component,
+ uint32_t errorCode) override {
+ // TODO
+ (void)component;
+ (void)errorCode;
+ }
+
+ virtual void onDeath(
+ const std::weak_ptr<Codec2Client::Component>& component) override {
+ { // Log the death of the component.
+ std::shared_ptr<Codec2Client::Component> comp = component.lock();
+ if (!comp) {
+ ALOGE("Codec2 component died.");
+ } else {
+ ALOGE("Codec2 component \"%s\" died.", comp->getName().c_str());
+ }
+ }
+
+ // Report to MediaCodec.
+ sp<CCodec> codec(mCodec.promote());
+ if (!codec || !codec->mCallback) {
+ return;
+ }
+ codec->mCallback->onError(DEAD_OBJECT, ACTION_CODE_FATAL);
+ }
+
+ virtual void onFramesRendered(
+ const std::vector<RenderedFrame>& renderedFrames) override {
+ // TODO
+ (void)renderedFrames;
+ }
+
+ virtual void onInputBufferDone(
+ const std::shared_ptr<C2Buffer>& buffer) override {
+ sp<CCodec> codec(mCodec.promote());
+ if (codec) {
+ codec->onInputBufferDone(buffer);
+ }
+ }
+
+private:
+ wp<CCodec> mCodec;
+};
+
+// CCodecCallbackImpl
+
+class CCodecCallbackImpl : public CCodecCallback {
+public:
+ explicit CCodecCallbackImpl(CCodec *codec) : mCodec(codec) {}
+ ~CCodecCallbackImpl() override = default;
+
+ void onError(status_t err, enum ActionCode actionCode) override {
+ mCodec->mCallback->onError(err, actionCode);
+ }
+
+ void onOutputFramesRendered(int64_t mediaTimeUs, nsecs_t renderTimeNs) override {
+ mCodec->mCallback->onOutputFramesRendered(
+ {RenderedFrameInfo(mediaTimeUs, renderTimeNs)});
+ }
+
+ void onWorkQueued(bool eos) override {
+ mCodec->onWorkQueued(eos);
+ }
+
+ void onOutputBuffersChanged() override {
+ mCodec->mCallback->onOutputBuffersChanged();
+ }
+
+private:
+ CCodec *mCodec;
+};
+
+// CCodec
+
+CCodec::CCodec()
+ : mChannel(new CCodecBufferChannel(std::make_shared<CCodecCallbackImpl>(this))),
+ mQueuedWorkCount(0) {
+}
+
+CCodec::~CCodec() {
+}
+
+std::shared_ptr<BufferChannelBase> CCodec::getBufferChannel() {
+ return mChannel;
+}
+
+status_t CCodec::tryAndReportOnError(std::function<status_t()> job) {
+ status_t err = job();
+ if (err != C2_OK) {
+ mCallback->onError(err, ACTION_CODE_FATAL);
+ }
+ return err;
+}
+
+void CCodec::initiateAllocateComponent(const sp<AMessage> &msg) {
+ auto setAllocating = [this] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != RELEASED) {
+ return INVALID_OPERATION;
+ }
+ state->set(ALLOCATING);
+ return OK;
+ };
+ if (tryAndReportOnError(setAllocating) != OK) {
+ return;
+ }
+
+ sp<RefBase> codecInfo;
+ CHECK(msg->findObject("codecInfo", &codecInfo));
+ // For Codec 2.0 components, componentName == codecInfo->getCodecName().
+
+ sp<AMessage> allocMsg(new AMessage(kWhatAllocate, this));
+ allocMsg->setObject("codecInfo", codecInfo);
+ allocMsg->post();
+}
+
+void CCodec::allocate(const sp<MediaCodecInfo> &codecInfo) {
+ if (codecInfo == nullptr) {
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return;
+ }
+ ALOGD("allocate(%s)", codecInfo->getCodecName());
+ mClientListener.reset(new ClientListener(this));
+
+ AString componentName = codecInfo->getCodecName();
+ std::shared_ptr<Codec2Client> client;
+
+ // set up preferred component store to access vendor store parameters
+ client = Codec2Client::CreateFromService("default", false);
+ if (client) {
+ ALOGI("setting up '%s' as default (vendor) store", client->getInstanceName().c_str());
+ SetPreferredCodec2ComponentStore(
+ std::make_shared<Codec2ClientInterfaceWrapper>(client));
+ }
+
+ std::shared_ptr<Codec2Client::Component> comp =
+ Codec2Client::CreateComponentByName(
+ componentName.c_str(),
+ mClientListener,
+ &client);
+ if (!comp) {
+ ALOGE("Failed Create component: %s", componentName.c_str());
+ Mutexed<State>::Locked state(mState);
+ state->set(RELEASED);
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ ALOGI("Created component [%s]", componentName.c_str());
+ mChannel->setComponent(comp);
+ auto setAllocated = [this, comp, client] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != ALLOCATING) {
+ state->set(RELEASED);
+ return UNKNOWN_ERROR;
+ }
+ state->set(ALLOCATED);
+ state->comp = comp;
+ mClient = client;
+ return OK;
+ };
+ if (tryAndReportOnError(setAllocated) != OK) {
+ return;
+ }
+
+ // initialize config here in case setParameters is called prior to configure
+ Mutexed<Config>::Locked config(mConfig);
+ status_t err = config->initialize(mClient, comp);
+ if (err != OK) {
+ ALOGW("Failed to initialize configuration support");
+ // TODO: report error once we complete implementation.
+ }
+ config->queryConfiguration(comp);
+
+ mCallback->onComponentAllocated(componentName.c_str());
+}
+
+void CCodec::initiateConfigureComponent(const sp<AMessage> &format) {
+ auto checkAllocated = [this] {
+ Mutexed<State>::Locked state(mState);
+ return (state->get() != ALLOCATED) ? UNKNOWN_ERROR : OK;
+ };
+ if (tryAndReportOnError(checkAllocated) != OK) {
+ return;
+ }
+
+ sp<AMessage> msg(new AMessage(kWhatConfigure, this));
+ msg->setMessage("format", format);
+ msg->post();
+}
+
+void CCodec::configure(const sp<AMessage> &msg) {
+ std::shared_ptr<Codec2Client::Component> comp;
+ auto checkAllocated = [this, &comp] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != ALLOCATED) {
+ state->set(RELEASED);
+ return UNKNOWN_ERROR;
+ }
+ comp = state->comp;
+ return OK;
+ };
+ if (tryAndReportOnError(checkAllocated) != OK) {
+ return;
+ }
+
+ auto doConfig = [msg, comp, this]() -> status_t {
+ AString mime;
+ if (!msg->findString("mime", &mime)) {
+ return BAD_VALUE;
+ }
+
+ int32_t encoder;
+ if (!msg->findInt32("encoder", &encoder)) {
+ encoder = false;
+ }
+
+ // TODO: read from intf()
+ if ((!encoder) != (comp->getName().find("encoder") == std::string::npos)) {
+ return UNKNOWN_ERROR;
+ }
+
+ int32_t storeMeta;
+ if (encoder
+ && msg->findInt32("android._input-metadata-buffer-type", &storeMeta)
+ && storeMeta != kMetadataBufferTypeInvalid) {
+ if (storeMeta != kMetadataBufferTypeANWBuffer) {
+ ALOGD("Only ANW buffers are supported for legacy metadata mode");
+ return BAD_VALUE;
+ }
+ mChannel->setMetaMode(CCodecBufferChannel::MODE_ANW);
+ }
+
+ sp<RefBase> obj;
+ sp<Surface> surface;
+ if (msg->findObject("native-window", &obj)) {
+ surface = static_cast<Surface *>(obj.get());
+ setSurface(surface);
+ }
+
+ Mutexed<Config>::Locked config(mConfig);
+ config->mUsingSurface = surface != nullptr;
+
+ /*
+ * Handle input surface configuration
+ */
+ if ((config->mDomain & (Config::IS_VIDEO | Config::IS_IMAGE))
+ && (config->mDomain & Config::IS_ENCODER)) {
+ config->mISConfig.reset(new InputSurfaceWrapper::Config{});
+ {
+ config->mISConfig->mMinFps = 0;
+ int64_t value;
+ if (msg->findInt64("repeat-previous-frame-after", &value) && value > 0) {
+ config->mISConfig->mMinFps = 1e6 / value;
+ }
+ (void)msg->findFloat("max-fps-to-encoder", &config->mISConfig->mMaxFps);
+ config->mISConfig->mMinAdjustedFps = 0;
+ config->mISConfig->mFixedAdjustedFps = 0;
+ if (msg->findInt64("max-pts-gap-to-encoder", &value)) {
+ if (value < 0 && value >= INT32_MIN) {
+ config->mISConfig->mFixedAdjustedFps = -1e6 / value;
+ } else if (value > 0 && value <= INT32_MAX) {
+ config->mISConfig->mMinAdjustedFps = 1e6 / value;
+ }
+ }
+ }
+
+ {
+ double value;
+ if (msg->findDouble("time-lapse-fps", &value)) {
+ config->mISConfig->mCaptureFps = value;
+ (void)msg->findAsFloat(KEY_FRAME_RATE, &config->mISConfig->mCodedFps);
+ }
+ }
+
+ {
+ config->mISConfig->mSuspended = false;
+ config->mISConfig->mSuspendAtUs = -1;
+ int32_t value;
+ if (msg->findInt32("create-input-buffers-suspended", &value) && value) {
+ config->mISConfig->mSuspended = true;
+ }
+ }
+ }
+
+ /*
+ * Handle desired color format.
+ */
+ if ((config->mDomain & (Config::IS_VIDEO | Config::IS_IMAGE))) {
+ int32_t format = -1;
+ if (!msg->findInt32(KEY_COLOR_FORMAT, &format)) {
+ /*
+ * Also handle default color format (encoders require color format, so this is only
+ * needed for decoders.
+ */
+ if (!(config->mDomain & Config::IS_ENCODER)) {
+ format = (surface == nullptr) ? COLOR_FormatYUV420Planar : COLOR_FormatSurface;
+ }
+ }
+
+ if (format >= 0) {
+ msg->setInt32("android._color-format", format);
+ }
+ }
+
+ std::vector<std::unique_ptr<C2Param>> configUpdate;
+ status_t err = config->getConfigUpdateFromSdkParams(
+ comp, msg, Config::IS_CONFIG, C2_DONT_BLOCK, &configUpdate);
+ if (err != OK) {
+ ALOGW("failed to convert configuration to c2 params");
+ }
+ err = config->setParameters(comp, configUpdate, C2_DONT_BLOCK);
+ if (err != OK) {
+ ALOGW("failed to configure c2 params");
+ return err;
+ }
+
+ std::vector<std::unique_ptr<C2Param>> params;
+ C2StreamUsageTuning::input usage(0u, 0u);
+ C2StreamMaxBufferSizeInfo::input maxInputSize(0u, 0u);
+
+ std::initializer_list<C2Param::Index> indices {
+ };
+ c2_status_t c2err = comp->query(
+ { &usage, &maxInputSize },
+ indices,
+ C2_DONT_BLOCK,
+ ¶ms);
+ if (c2err != C2_OK && c2err != C2_BAD_INDEX) {
+ ALOGE("Failed to query component interface: %d", c2err);
+ return UNKNOWN_ERROR;
+ }
+ if (params.size() != indices.size()) {
+ ALOGE("Component returns wrong number of params: expected %zu actual %zu",
+ indices.size(), params.size());
+ return UNKNOWN_ERROR;
+ }
+ if (usage && (usage.value & C2MemoryUsage::CPU_READ)) {
+ config->mInputFormat->setInt32("using-sw-read-often", true);
+ }
+
+ // NOTE: we don't blindly use client specified input size if specified as clients
+ // at times specify too small size. Instead, mimic the behavior from OMX, where the
+ // client specified size is only used to ask for bigger buffers than component suggested
+ // size.
+ int32_t clientInputSize = 0;
+ bool clientSpecifiedInputSize =
+ msg->findInt32(KEY_MAX_INPUT_SIZE, &clientInputSize) && clientInputSize > 0;
+ // TEMP: enforce minimum buffer size of 1MB for video decoders
+ // and 16K / 4K for audio encoders/decoders
+ if (maxInputSize.value == 0) {
+ if (config->mDomain & Config::IS_AUDIO) {
+ maxInputSize.value = encoder ? 16384 : 4096;
+ } else if (!encoder) {
+ maxInputSize.value = 1048576u;
+ }
+ }
+
+ // verify that CSD fits into this size (if defined)
+ if ((config->mDomain & Config::IS_DECODER) && maxInputSize.value > 0) {
+ sp<ABuffer> csd;
+ for (size_t ix = 0; msg->findBuffer(StringPrintf("csd-%zu", ix).c_str(), &csd); ++ix) {
+ if (csd && csd->size() > maxInputSize.value) {
+ maxInputSize.value = csd->size();
+ }
+ }
+ }
+
+ // TODO: do this based on component requiring linear allocator for input
+ if ((config->mDomain & Config::IS_DECODER) || (config->mDomain & Config::IS_AUDIO)) {
+ if (clientSpecifiedInputSize) {
+ // Warn that we're overriding client's max input size if necessary.
+ if ((uint32_t)clientInputSize < maxInputSize.value) {
+ ALOGD("client requested max input size %d, which is smaller than "
+ "what component recommended (%u); overriding with component "
+ "recommendation.", clientInputSize, maxInputSize.value);
+ ALOGW("This behavior is subject to change. It is recommended that "
+ "app developers double check whether the requested "
+ "max input size is in reasonable range.");
+ } else {
+ maxInputSize.value = clientInputSize;
+ }
+ }
+ // Pass max input size on input format to the buffer channel (if supplied by the
+ // component or by a default)
+ if (maxInputSize.value) {
+ config->mInputFormat->setInt32(
+ KEY_MAX_INPUT_SIZE,
+ (int32_t)(c2_min(maxInputSize.value, uint32_t(INT32_MAX))));
+ }
+ }
+
+ if ((config->mDomain & (Config::IS_VIDEO | Config::IS_IMAGE))) {
+ // propagate HDR static info to output format for both encoders and decoders
+ // if component supports this info, we will update from component, but only the raw port,
+ // so don't propagate if component already filled it in.
+ sp<ABuffer> hdrInfo;
+ if (msg->findBuffer(KEY_HDR_STATIC_INFO, &hdrInfo)
+ && !config->mOutputFormat->findBuffer(KEY_HDR_STATIC_INFO, &hdrInfo)) {
+ config->mOutputFormat->setBuffer(KEY_HDR_STATIC_INFO, hdrInfo);
+ }
+
+ // Set desired color format from configuration parameter
+ int32_t format;
+ if (msg->findInt32("android._color-format", &format)) {
+ if (config->mDomain & Config::IS_ENCODER) {
+ config->mInputFormat->setInt32(KEY_COLOR_FORMAT, format);
+ } else {
+ config->mOutputFormat->setInt32(KEY_COLOR_FORMAT, format);
+ }
+ }
+ }
+
+ // propagate encoder delay and padding to output format
+ if ((config->mDomain & Config::IS_DECODER) && (config->mDomain & Config::IS_AUDIO)) {
+ int delay = 0;
+ if (msg->findInt32("encoder-delay", &delay)) {
+ config->mOutputFormat->setInt32("encoder-delay", delay);
+ }
+ int padding = 0;
+ if (msg->findInt32("encoder-padding", &padding)) {
+ config->mOutputFormat->setInt32("encoder-padding", padding);
+ }
+ }
+
+ // set channel-mask
+ if (config->mDomain & Config::IS_AUDIO) {
+ int32_t mask;
+ if (msg->findInt32(KEY_CHANNEL_MASK, &mask)) {
+ if (config->mDomain & Config::IS_ENCODER) {
+ config->mInputFormat->setInt32(KEY_CHANNEL_MASK, mask);
+ } else {
+ config->mOutputFormat->setInt32(KEY_CHANNEL_MASK, mask);
+ }
+ }
+ }
+
+ ALOGD("setup formats input: %s and output: %s",
+ config->mInputFormat->debugString().c_str(),
+ config->mOutputFormat->debugString().c_str());
+ return OK;
+ };
+ if (tryAndReportOnError(doConfig) != OK) {
+ return;
+ }
+
+ Mutexed<Config>::Locked config(mConfig);
+
+ mCallback->onComponentConfigured(config->mInputFormat, config->mOutputFormat);
+}
+
+void CCodec::initiateCreateInputSurface() {
+ status_t err = [this] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != ALLOCATED) {
+ return UNKNOWN_ERROR;
+ }
+ // TODO: read it from intf() properly.
+ if (state->comp->getName().find("encoder") == std::string::npos) {
+ return INVALID_OPERATION;
+ }
+ return OK;
+ }();
+ if (err != OK) {
+ mCallback->onInputSurfaceCreationFailed(err);
+ return;
+ }
+
+ (new AMessage(kWhatCreateInputSurface, this))->post();
+}
+
+void CCodec::createInputSurface() {
+ status_t err;
+ sp<IGraphicBufferProducer> bufferProducer;
+
+ sp<AMessage> inputFormat;
+ sp<AMessage> outputFormat;
+ {
+ Mutexed<Config>::Locked config(mConfig);
+ inputFormat = config->mInputFormat;
+ outputFormat = config->mOutputFormat;
+ }
+
+ std::shared_ptr<PersistentSurface> persistentSurface(CreateInputSurface());
+
+ if (persistentSurface->getHidlTarget()) {
+ sp<IInputSurface> inputSurface = IInputSurface::castFrom(
+ persistentSurface->getHidlTarget());
+ if (!inputSurface) {
+ ALOGE("Corrupted input surface");
+ mCallback->onInputSurfaceCreationFailed(UNKNOWN_ERROR);
+ return;
+ }
+ err = setupInputSurface(std::make_shared<C2InputSurfaceWrapper>(
+ std::make_shared<Codec2Client::InputSurface>(inputSurface)));
+ bufferProducer = new H2BGraphicBufferProducer(inputSurface);
+ } else {
+ int32_t width = 0;
+ (void)outputFormat->findInt32("width", &width);
+ int32_t height = 0;
+ (void)outputFormat->findInt32("height", &height);
+ err = setupInputSurface(std::make_shared<GraphicBufferSourceWrapper>(
+ persistentSurface->getBufferSource(), width, height));
+ bufferProducer = persistentSurface->getBufferProducer();
+ }
+
+ if (err != OK) {
+ ALOGE("Failed to set up input surface: %d", err);
+ mCallback->onInputSurfaceCreationFailed(err);
+ return;
+ }
+
+ mCallback->onInputSurfaceCreated(
+ inputFormat,
+ outputFormat,
+ new BufferProducerWrapper(bufferProducer));
+}
+
+status_t CCodec::setupInputSurface(const std::shared_ptr<InputSurfaceWrapper> &surface) {
+ Mutexed<Config>::Locked config(mConfig);
+ config->mUsingSurface = true;
+
+ // we are now using surface - apply default color aspects to input format - as well as
+ // get dataspace
+ bool inputFormatChanged = config->updateFormats(config->IS_INPUT);
+ ALOGD("input format %s to %s",
+ inputFormatChanged ? "changed" : "unchanged",
+ config->mInputFormat->debugString().c_str());
+
+ // configure dataspace
+ static_assert(sizeof(int32_t) == sizeof(android_dataspace), "dataspace size mismatch");
+ android_dataspace dataSpace = HAL_DATASPACE_UNKNOWN;
+ (void)config->mInputFormat->findInt32("android._dataspace", (int32_t*)&dataSpace);
+ surface->setDataSpace(dataSpace);
+
+ status_t err = mChannel->setInputSurface(surface);
+ if (err != OK) {
+ // undo input format update
+ config->mUsingSurface = false;
+ (void)config->updateFormats(config->IS_INPUT);
+ return err;
+ }
+ config->mInputSurface = surface;
+
+ if (config->mISConfig) {
+ surface->configure(*config->mISConfig);
+ } else {
+ ALOGD("ISConfig: no configuration");
+ }
+
+ return surface->start();
+}
+
+void CCodec::initiateSetInputSurface(const sp<PersistentSurface> &surface) {
+ sp<AMessage> msg = new AMessage(kWhatSetInputSurface, this);
+ msg->setObject("surface", surface);
+ msg->post();
+}
+
+void CCodec::setInputSurface(const sp<PersistentSurface> &surface) {
+ sp<AMessage> inputFormat;
+ sp<AMessage> outputFormat;
+ {
+ Mutexed<Config>::Locked config(mConfig);
+ inputFormat = config->mInputFormat;
+ outputFormat = config->mOutputFormat;
+ }
+ auto hidlTarget = surface->getHidlTarget();
+ if (hidlTarget) {
+ sp<IInputSurface> inputSurface =
+ IInputSurface::castFrom(hidlTarget);
+ if (!inputSurface) {
+ ALOGE("Failed to set input surface: Corrupted surface.");
+ mCallback->onInputSurfaceDeclined(UNKNOWN_ERROR);
+ return;
+ }
+ status_t err = setupInputSurface(std::make_shared<C2InputSurfaceWrapper>(
+ std::make_shared<Codec2Client::InputSurface>(inputSurface)));
+ if (err != OK) {
+ ALOGE("Failed to set up input surface: %d", err);
+ mCallback->onInputSurfaceDeclined(err);
+ return;
+ }
+ } else {
+ int32_t width = 0;
+ (void)outputFormat->findInt32("width", &width);
+ int32_t height = 0;
+ (void)outputFormat->findInt32("height", &height);
+ status_t err = setupInputSurface(std::make_shared<GraphicBufferSourceWrapper>(
+ surface->getBufferSource(), width, height));
+ if (err != OK) {
+ ALOGE("Failed to set up input surface: %d", err);
+ mCallback->onInputSurfaceDeclined(err);
+ return;
+ }
+ }
+ mCallback->onInputSurfaceAccepted(inputFormat, outputFormat);
+}
+
+void CCodec::initiateStart() {
+ auto setStarting = [this] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != ALLOCATED) {
+ return UNKNOWN_ERROR;
+ }
+ state->set(STARTING);
+ return OK;
+ };
+ if (tryAndReportOnError(setStarting) != OK) {
+ return;
+ }
+
+ (new AMessage(kWhatStart, this))->post();
+}
+
+void CCodec::start() {
+ std::shared_ptr<Codec2Client::Component> comp;
+ auto checkStarting = [this, &comp] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != STARTING) {
+ return UNKNOWN_ERROR;
+ }
+ comp = state->comp;
+ return OK;
+ };
+ if (tryAndReportOnError(checkStarting) != OK) {
+ return;
+ }
+
+ c2_status_t err = comp->start();
+ if (err != C2_OK) {
+ mCallback->onError(toStatusT(err, C2_OPERATION_Component_start),
+ ACTION_CODE_FATAL);
+ return;
+ }
+ sp<AMessage> inputFormat;
+ sp<AMessage> outputFormat;
+ {
+ Mutexed<Config>::Locked config(mConfig);
+ inputFormat = config->mInputFormat;
+ outputFormat = config->mOutputFormat;
+ }
+ status_t err2 = mChannel->start(inputFormat, outputFormat);
+ if (err2 != OK) {
+ mCallback->onError(err2, ACTION_CODE_FATAL);
+ return;
+ }
+
+ auto setRunning = [this] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != STARTING) {
+ return UNKNOWN_ERROR;
+ }
+ state->set(RUNNING);
+ return OK;
+ };
+ if (tryAndReportOnError(setRunning) != OK) {
+ return;
+ }
+ mCallback->onStartCompleted();
+
+ (void)mChannel->requestInitialInputBuffers();
+}
+
+void CCodec::initiateShutdown(bool keepComponentAllocated) {
+ if (keepComponentAllocated) {
+ initiateStop();
+ } else {
+ initiateRelease();
+ }
+}
+
+void CCodec::initiateStop() {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() == ALLOCATED
+ || state->get() == RELEASED
+ || state->get() == STOPPING
+ || state->get() == RELEASING) {
+ // We're already stopped, released, or doing it right now.
+ state.unlock();
+ mCallback->onStopCompleted();
+ state.lock();
+ return;
+ }
+ state->set(STOPPING);
+ }
+
+ mChannel->stop();
+ (new AMessage(kWhatStop, this))->post();
+}
+
+void CCodec::stop() {
+ std::shared_ptr<Codec2Client::Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() == RELEASING) {
+ state.unlock();
+ // We're already stopped or release is in progress.
+ mCallback->onStopCompleted();
+ state.lock();
+ return;
+ } else if (state->get() != STOPPING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ comp = state->comp;
+ }
+ status_t err = comp->stop();
+ if (err != C2_OK) {
+ // TODO: convert err into status_t
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ }
+
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() == STOPPING) {
+ state->set(ALLOCATED);
+ }
+ }
+ mCallback->onStopCompleted();
+}
+
+void CCodec::initiateRelease(bool sendCallback /* = true */) {
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() == RELEASED || state->get() == RELEASING) {
+ // We're already released or doing it right now.
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ if (state->get() == ALLOCATING) {
+ state->set(RELEASING);
+ // With the altered state allocate() would fail and clean up.
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ state->set(RELEASING);
+ }
+
+ mChannel->stop();
+ // thiz holds strong ref to this while the thread is running.
+ sp<CCodec> thiz(this);
+ std::thread([thiz, sendCallback] { thiz->release(sendCallback); }).detach();
+}
+
+void CCodec::release(bool sendCallback) {
+ std::shared_ptr<Codec2Client::Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() == RELEASED) {
+ if (sendCallback) {
+ state.unlock();
+ mCallback->onReleaseCompleted();
+ state.lock();
+ }
+ return;
+ }
+ comp = state->comp;
+ }
+ comp->release();
+
+ {
+ Mutexed<State>::Locked state(mState);
+ state->set(RELEASED);
+ state->comp.reset();
+ }
+ if (sendCallback) {
+ mCallback->onReleaseCompleted();
+ }
+}
+
+status_t CCodec::setSurface(const sp<Surface> &surface) {
+ return mChannel->setSurface(surface);
+}
+
+void CCodec::signalFlush() {
+ status_t err = [this] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() == FLUSHED) {
+ return ALREADY_EXISTS;
+ }
+ if (state->get() != RUNNING) {
+ return UNKNOWN_ERROR;
+ }
+ state->set(FLUSHING);
+ return OK;
+ }();
+ switch (err) {
+ case ALREADY_EXISTS:
+ mCallback->onFlushCompleted();
+ return;
+ case OK:
+ break;
+ default:
+ mCallback->onError(err, ACTION_CODE_FATAL);
+ return;
+ }
+
+ mChannel->stop();
+ (new AMessage(kWhatFlush, this))->post();
+}
+
+void CCodec::flush() {
+ std::shared_ptr<Codec2Client::Component> comp;
+ auto checkFlushing = [this, &comp] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != FLUSHING) {
+ return UNKNOWN_ERROR;
+ }
+ comp = state->comp;
+ return OK;
+ };
+ if (tryAndReportOnError(checkFlushing) != OK) {
+ return;
+ }
+
+ std::list<std::unique_ptr<C2Work>> flushedWork;
+ c2_status_t err = comp->flush(C2Component::FLUSH_COMPONENT, &flushedWork);
+ {
+ Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
+ flushedWork.splice(flushedWork.end(), *queue);
+ }
+ if (err != C2_OK) {
+ // TODO: convert err into status_t
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ }
+
+ mChannel->flush(flushedWork);
+ subQueuedWorkCount(flushedWork.size());
+
+ {
+ Mutexed<State>::Locked state(mState);
+ state->set(FLUSHED);
+ }
+ mCallback->onFlushCompleted();
+}
+
+void CCodec::signalResume() {
+ auto setResuming = [this] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != FLUSHED) {
+ return UNKNOWN_ERROR;
+ }
+ state->set(RESUMING);
+ return OK;
+ };
+ if (tryAndReportOnError(setResuming) != OK) {
+ return;
+ }
+
+ (void)mChannel->start(nullptr, nullptr);
+
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() != RESUMING) {
+ state.unlock();
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ state->set(RUNNING);
+ }
+
+ (void)mChannel->requestInitialInputBuffers();
+}
+
+void CCodec::signalSetParameters(const sp<AMessage> ¶ms) {
+ sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
+ msg->setMessage("params", params);
+ msg->post();
+}
+
+void CCodec::setParameters(const sp<AMessage> ¶ms) {
+ std::shared_ptr<Codec2Client::Component> comp;
+ auto checkState = [this, &comp] {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() == RELEASED) {
+ return INVALID_OPERATION;
+ }
+ comp = state->comp;
+ return OK;
+ };
+ if (tryAndReportOnError(checkState) != OK) {
+ return;
+ }
+
+ Mutexed<Config>::Locked config(mConfig);
+
+ /**
+ * Handle input surface parameters
+ */
+ if ((config->mDomain & (Config::IS_VIDEO | Config::IS_IMAGE))
+ && (config->mDomain & Config::IS_ENCODER) && config->mInputSurface && config->mISConfig) {
+ (void)params->findInt64("time-offset-us", &config->mISConfig->mTimeOffsetUs);
+
+ if (params->findInt64("skip-frames-before", &config->mISConfig->mStartAtUs)) {
+ config->mISConfig->mStopped = false;
+ } else if (params->findInt64("stop-time-us", &config->mISConfig->mStopAtUs)) {
+ config->mISConfig->mStopped = true;
+ }
+
+ int32_t value;
+ if (params->findInt32("drop-input-frames", &value)) {
+ config->mISConfig->mSuspended = value;
+ config->mISConfig->mSuspendAtUs = -1;
+ (void)params->findInt64("drop-start-time-us", &config->mISConfig->mSuspendAtUs);
+ }
+
+ (void)config->mInputSurface->configure(*config->mISConfig);
+ if (config->mISConfig->mStopped) {
+ config->mInputFormat->setInt64(
+ "android._stop-time-offset-us", config->mISConfig->mInputDelayUs);
+ }
+ }
+
+ std::vector<std::unique_ptr<C2Param>> configUpdate;
+ (void)config->getConfigUpdateFromSdkParams(
+ comp, params, Config::IS_PARAM, C2_MAY_BLOCK, &configUpdate);
+ // Prefer to pass parameters to the buffer channel, so they can be synchronized with the frames.
+ // Parameter synchronization is not defined when using input surface. For now, route
+ // these directly to the component.
+ if (config->mInputSurface == nullptr
+ && (property_get_bool("debug.stagefright.ccodec_delayed_params", false)
+ || comp->getName().find("c2.android.") == 0)) {
+ mChannel->setParameters(configUpdate);
+ } else {
+ (void)config->setParameters(comp, configUpdate, C2_MAY_BLOCK);
+ }
+}
+
+void CCodec::signalEndOfInputStream() {
+ mCallback->onSignaledInputEOS(mChannel->signalEndOfInputStream());
+}
+
+void CCodec::signalRequestIDRFrame() {
+ std::shared_ptr<Codec2Client::Component> comp;
+ {
+ Mutexed<State>::Locked state(mState);
+ if (state->get() == RELEASED) {
+ ALOGD("no IDR request sent since component is released");
+ return;
+ }
+ comp = state->comp;
+ }
+ ALOGV("request IDR");
+ Mutexed<Config>::Locked config(mConfig);
+ std::vector<std::unique_ptr<C2Param>> params;
+ params.push_back(
+ std::make_unique<C2StreamRequestSyncFrameTuning::output>(0u, true));
+ config->setParameters(comp, params, C2_MAY_BLOCK);
+}
+
+void CCodec::onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems,
+ size_t numDiscardedInputBuffers) {
+ if (!workItems.empty()) {
+ {
+ Mutexed<std::list<size_t>>::Locked numDiscardedInputBuffersQueue(
+ mNumDiscardedInputBuffersQueue);
+ numDiscardedInputBuffersQueue->insert(
+ numDiscardedInputBuffersQueue->end(),
+ workItems.size() - 1, 0);
+ numDiscardedInputBuffersQueue->emplace_back(
+ numDiscardedInputBuffers);
+ }
+ {
+ Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
+ queue->splice(queue->end(), workItems);
+ }
+ }
+ (new AMessage(kWhatWorkDone, this))->post();
+}
+
+void CCodec::onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer) {
+ mChannel->onInputBufferDone(buffer);
+}
+
+void CCodec::onMessageReceived(const sp<AMessage> &msg) {
+ TimePoint now = std::chrono::steady_clock::now();
+ CCodecWatchdog::getInstance()->watch(this);
+ switch (msg->what()) {
+ case kWhatAllocate: {
+ // C2ComponentStore::createComponent() should return within 100ms.
+ setDeadline(now, 150ms, "allocate");
+ sp<RefBase> obj;
+ CHECK(msg->findObject("codecInfo", &obj));
+ allocate((MediaCodecInfo *)obj.get());
+ break;
+ }
+ case kWhatConfigure: {
+ // C2Component::commit_sm() should return within 5ms.
+ setDeadline(now, 250ms, "configure");
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+ configure(format);
+ break;
+ }
+ case kWhatStart: {
+ // C2Component::start() should return within 500ms.
+ setDeadline(now, 550ms, "start");
+ mQueuedWorkCount = 0;
+ start();
+ break;
+ }
+ case kWhatStop: {
+ // C2Component::stop() should return within 500ms.
+ setDeadline(now, 550ms, "stop");
+ stop();
+
+ mQueuedWorkCount = 0;
+ Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
+ deadline->set(TimePoint::max(), "none");
+ break;
+ }
+ case kWhatFlush: {
+ // C2Component::flush_sm() should return within 5ms.
+ setDeadline(now, 50ms, "flush");
+ flush();
+ break;
+ }
+ case kWhatCreateInputSurface: {
+ // Surface operations may be briefly blocking.
+ setDeadline(now, 100ms, "createInputSurface");
+ createInputSurface();
+ break;
+ }
+ case kWhatSetInputSurface: {
+ // Surface operations may be briefly blocking.
+ setDeadline(now, 100ms, "setInputSurface");
+ sp<RefBase> obj;
+ CHECK(msg->findObject("surface", &obj));
+ sp<PersistentSurface> surface(static_cast<PersistentSurface *>(obj.get()));
+ setInputSurface(surface);
+ break;
+ }
+ case kWhatSetParameters: {
+ setDeadline(now, 50ms, "setParameters");
+ sp<AMessage> params;
+ CHECK(msg->findMessage("params", ¶ms));
+ setParameters(params);
+ break;
+ }
+ case kWhatWorkDone: {
+ std::unique_ptr<C2Work> work;
+ size_t numDiscardedInputBuffers;
+ bool shouldPost = false;
+ {
+ Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
+ if (queue->empty()) {
+ break;
+ }
+ work.swap(queue->front());
+ queue->pop_front();
+ shouldPost = !queue->empty();
+ }
+ {
+ Mutexed<std::list<size_t>>::Locked numDiscardedInputBuffersQueue(
+ mNumDiscardedInputBuffersQueue);
+ if (numDiscardedInputBuffersQueue->empty()) {
+ numDiscardedInputBuffers = 0;
+ } else {
+ numDiscardedInputBuffers = numDiscardedInputBuffersQueue->front();
+ numDiscardedInputBuffersQueue->pop_front();
+ }
+ }
+ if (shouldPost) {
+ (new AMessage(kWhatWorkDone, this))->post();
+ }
+
+ if (work->worklets.empty()
+ || !(work->worklets.front()->output.flags & C2FrameData::FLAG_INCOMPLETE)) {
+ subQueuedWorkCount(1);
+ }
+ // handle configuration changes in work done
+ Mutexed<Config>::Locked config(mConfig);
+ bool changed = false;
+ Config::Watcher<C2StreamInitDataInfo::output> initData =
+ config->watch<C2StreamInitDataInfo::output>();
+ if (!work->worklets.empty()
+ && (work->worklets.front()->output.flags
+ & C2FrameData::FLAG_DISCARD_FRAME) == 0) {
+
+ // copy buffer info to config
+ std::vector<std::unique_ptr<C2Param>> updates =
+ std::move(work->worklets.front()->output.configUpdate);
+ unsigned stream = 0;
+ for (const std::shared_ptr<C2Buffer> &buf : work->worklets.front()->output.buffers) {
+ for (const std::shared_ptr<const C2Info> &info : buf->info()) {
+ // move all info into output-stream #0 domain
+ updates.emplace_back(C2Param::CopyAsStream(*info, true /* output */, stream));
+ }
+ for (const C2ConstGraphicBlock &block : buf->data().graphicBlocks()) {
+ // ALOGV("got output buffer with crop %u,%u+%u,%u and size %u,%u",
+ // block.crop().left, block.crop().top,
+ // block.crop().width, block.crop().height,
+ // block.width(), block.height());
+ updates.emplace_back(new C2StreamCropRectInfo::output(stream, block.crop()));
+ updates.emplace_back(new C2StreamPictureSizeInfo::output(
+ stream, block.width(), block.height()));
+ break; // for now only do the first block
+ }
+ ++stream;
+ }
+
+ changed = config->updateConfiguration(updates, config->mOutputDomain);
+
+ // copy standard infos to graphic buffers if not already present (otherwise, we
+ // may overwrite the actual intermediate value with a final value)
+ stream = 0;
+ const static std::vector<C2Param::Index> stdGfxInfos = {
+ C2StreamRotationInfo::output::PARAM_TYPE,
+ C2StreamColorAspectsInfo::output::PARAM_TYPE,
+ C2StreamDataSpaceInfo::output::PARAM_TYPE,
+ C2StreamHdrStaticInfo::output::PARAM_TYPE,
+ C2StreamPixelAspectRatioInfo::output::PARAM_TYPE,
+ C2StreamSurfaceScalingInfo::output::PARAM_TYPE
+ };
+ for (const std::shared_ptr<C2Buffer> &buf : work->worklets.front()->output.buffers) {
+ if (buf->data().graphicBlocks().size()) {
+ for (C2Param::Index ix : stdGfxInfos) {
+ if (!buf->hasInfo(ix)) {
+ const C2Param *param =
+ config->getConfigParameterValue(ix.withStream(stream));
+ if (param) {
+ std::shared_ptr<C2Param> info(C2Param::Copy(*param));
+ buf->setInfo(std::static_pointer_cast<C2Info>(info));
+ }
+ }
+ }
+ }
+ ++stream;
+ }
+ }
+ mChannel->onWorkDone(
+ std::move(work), changed ? config->mOutputFormat : nullptr,
+ initData.hasChanged() ? initData.update().get() : nullptr,
+ numDiscardedInputBuffers);
+ break;
+ }
+ case kWhatWatch: {
+ // watch message already posted; no-op.
+ break;
+ }
+ default: {
+ ALOGE("unrecognized message");
+ break;
+ }
+ }
+ setDeadline(TimePoint::max(), 0ms, "none");
+}
+
+void CCodec::setDeadline(
+ const TimePoint &now,
+ const std::chrono::milliseconds &timeout,
+ const char *name) {
+ int32_t mult = std::max(1, property_get_int32("debug.stagefright.ccodec_timeout_mult", 1));
+ Mutexed<NamedTimePoint>::Locked deadline(mDeadline);
+ deadline->set(now + (timeout * mult), name);
+}
+
+void CCodec::initiateReleaseIfStuck() {
+ std::string name;
+ bool pendingDeadline = false;
+ for (Mutexed<NamedTimePoint> *deadlinePtr : { &mDeadline, &mQueueDeadline, &mEosDeadline }) {
+ Mutexed<NamedTimePoint>::Locked deadline(*deadlinePtr);
+ if (deadline->get() < std::chrono::steady_clock::now()) {
+ name = deadline->getName();
+ break;
+ }
+ if (deadline->get() != TimePoint::max()) {
+ pendingDeadline = true;
+ }
+ }
+ if (name.empty()) {
+ // We're not stuck.
+ if (pendingDeadline) {
+ // If we are not stuck yet but still has deadline coming up,
+ // post watch message to check back later.
+ (new AMessage(kWhatWatch, this))->post();
+ }
+ return;
+ }
+
+ ALOGW("previous call to %s exceeded timeout", name.c_str());
+ initiateRelease(false);
+ mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+}
+
+void CCodec::onWorkQueued(bool eos) {
+ ALOGV("queued work count +1 from %d", mQueuedWorkCount.load());
+ int32_t count = ++mQueuedWorkCount;
+ if (eos) {
+ CCodecWatchdog::getInstance()->watch(this);
+ Mutexed<NamedTimePoint>::Locked deadline(mEosDeadline);
+ deadline->set(std::chrono::steady_clock::now() + 3s, "eos");
+ }
+ // TODO: query and use input/pipeline/output delay combined
+ if (count >= 8) {
+ CCodecWatchdog::getInstance()->watch(this);
+ Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
+ deadline->set(std::chrono::steady_clock::now() + 3s, "queue");
+ }
+}
+
+void CCodec::subQueuedWorkCount(uint32_t count) {
+ ALOGV("queued work count -%u from %d", count, mQueuedWorkCount.load());
+ int32_t currentCount = (mQueuedWorkCount -= count);
+ if (currentCount == 0) {
+ Mutexed<NamedTimePoint>::Locked deadline(mEosDeadline);
+ deadline->set(TimePoint::max(), "none");
+ }
+ Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
+ deadline->set(TimePoint::max(), "none");
+}
+
+} // namespace android
+
+extern "C" android::CodecBase *CreateCodec() {
+ return new android::CCodec;
+}
+
+extern "C" android::PersistentSurface *CreateInputSurface() {
+ // Attempt to create a Codec2's input surface.
+ std::shared_ptr<android::Codec2Client::InputSurface> inputSurface =
+ android::Codec2Client::CreateInputSurface();
+ if (inputSurface) {
+ return new android::PersistentSurface(
+ inputSurface->getGraphicBufferProducer(),
+ static_cast<android::sp<android::hidl::base::V1_0::IBase>>(
+ inputSurface->getHalInterface()));
+ }
+
+ // Fall back to OMX.
+ using namespace android::hardware::media::omx::V1_0;
+ using namespace android::hardware::media::omx::V1_0::utils;
+ using namespace android::hardware::graphics::bufferqueue::V1_0::utils;
+ typedef android::hardware::media::omx::V1_0::Status OmxStatus;
+ android::sp<IOmx> omx = IOmx::getService();
+ typedef android::hardware::graphics::bufferqueue::V1_0::
+ IGraphicBufferProducer HGraphicBufferProducer;
+ typedef android::hardware::media::omx::V1_0::
+ IGraphicBufferSource HGraphicBufferSource;
+ OmxStatus s;
+ android::sp<HGraphicBufferProducer> gbp;
+ android::sp<HGraphicBufferSource> gbs;
+ android::Return<void> transStatus = omx->createInputSurface(
+ [&s, &gbp, &gbs](
+ OmxStatus status,
+ const android::sp<HGraphicBufferProducer>& producer,
+ const android::sp<HGraphicBufferSource>& source) {
+ s = status;
+ gbp = producer;
+ gbs = source;
+ });
+ if (transStatus.isOk() && s == OmxStatus::OK) {
+ return new android::PersistentSurface(
+ new H2BGraphicBufferProducer(gbp),
+ sp<::android::IGraphicBufferSource>(
+ new LWGraphicBufferSource(gbs)));
+ }
+
+ return nullptr;
+}
+
diff --git a/media/codec2/sfplugin/CCodec.h b/media/codec2/sfplugin/CCodec.h
new file mode 100644
index 0000000..78b009e
--- /dev/null
+++ b/media/codec2/sfplugin/CCodec.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C_CODEC_H_
+#define C_CODEC_H_
+
+#include <chrono>
+#include <list>
+#include <memory>
+#include <set>
+
+#include <C2Component.h>
+#include <codec2/hidl/client.h>
+
+#include <android/native_window.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/Mutexed.h>
+#include <media/stagefright/CodecBase.h>
+#include <media/stagefright/FrameRenderTracker.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/SkipCutBuffer.h>
+#include <utils/NativeHandle.h>
+#include <hardware/gralloc.h>
+#include <nativebase/nativebase.h>
+
+#include "CCodecConfig.h"
+
+namespace android {
+
+class CCodecBufferChannel;
+class InputSurfaceWrapper;
+struct MediaCodecInfo;
+
+class CCodec : public CodecBase {
+public:
+ CCodec();
+
+ virtual std::shared_ptr<BufferChannelBase> getBufferChannel() override;
+ virtual void initiateAllocateComponent(const sp<AMessage> &msg) override;
+ virtual void initiateConfigureComponent(const sp<AMessage> &msg) override;
+ virtual void initiateCreateInputSurface() override;
+ virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface) override;
+ virtual void initiateStart() override;
+ virtual void initiateShutdown(bool keepComponentAllocated = false) override;
+
+ virtual status_t setSurface(const sp<Surface> &surface) override;
+
+ virtual void signalFlush() override;
+ virtual void signalResume() override;
+
+ virtual void signalSetParameters(const sp<AMessage> ¶ms) override;
+ virtual void signalEndOfInputStream() override;
+ virtual void signalRequestIDRFrame() override;
+
+ void initiateReleaseIfStuck();
+ void onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems,
+ size_t numDiscardedInputBuffers);
+ void onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer);
+
+protected:
+ virtual ~CCodec();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg) override;
+
+private:
+ typedef std::chrono::time_point<std::chrono::steady_clock> TimePoint;
+
+ status_t tryAndReportOnError(std::function<status_t()> job);
+
+ void initiateStop();
+ void initiateRelease(bool sendCallback = true);
+
+ void allocate(const sp<MediaCodecInfo> &codecInfo);
+ void configure(const sp<AMessage> &msg);
+ void start();
+ void stop();
+ void flush();
+ void release(bool sendCallback);
+
+ void createInputSurface();
+ void setInputSurface(const sp<PersistentSurface> &surface);
+ status_t setupInputSurface(const std::shared_ptr<InputSurfaceWrapper> &surface);
+ void setParameters(const sp<AMessage> ¶ms);
+
+ void setDeadline(
+ const TimePoint &now,
+ const std::chrono::milliseconds &timeout,
+ const char *name);
+
+ void onWorkQueued(bool eos);
+ void subQueuedWorkCount(uint32_t count);
+
+ enum {
+ kWhatAllocate,
+ kWhatConfigure,
+ kWhatStart,
+ kWhatFlush,
+ kWhatStop,
+ kWhatRelease,
+ kWhatCreateInputSurface,
+ kWhatSetInputSurface,
+ kWhatSetParameters,
+
+ kWhatWorkDone,
+ kWhatWatch,
+ };
+
+ enum {
+ RELEASED,
+ ALLOCATED,
+ FLUSHED,
+ RUNNING,
+
+ ALLOCATING, // RELEASED -> ALLOCATED
+ STARTING, // ALLOCATED -> RUNNING
+ STOPPING, // RUNNING -> ALLOCATED
+ FLUSHING, // RUNNING -> FLUSHED
+ RESUMING, // FLUSHED -> RUNNING
+ RELEASING, // {ANY EXCEPT RELEASED} -> RELEASED
+ };
+
+ struct State {
+ inline State() : mState(RELEASED) {}
+ inline int get() const { return mState; }
+ inline void set(int newState) { mState = newState; }
+
+ std::shared_ptr<Codec2Client::Component> comp;
+ private:
+ int mState;
+ };
+
+ struct NamedTimePoint {
+ NamedTimePoint() : mTimePoint(TimePoint::max()), mName("") {}
+
+ inline void set(
+ const TimePoint &timePoint,
+ const char *name) {
+ mTimePoint = timePoint;
+ mName = name;
+ }
+
+ inline TimePoint get() const { return mTimePoint; }
+ inline const char *getName() const { return mName; }
+ private:
+ TimePoint mTimePoint;
+ const char *mName;
+ };
+
+ Mutexed<State> mState;
+ std::shared_ptr<CCodecBufferChannel> mChannel;
+
+ std::shared_ptr<Codec2Client> mClient;
+ std::shared_ptr<Codec2Client::Listener> mClientListener;
+ struct ClientListener;
+
+ Mutexed<NamedTimePoint> mDeadline;
+ std::atomic_int32_t mQueuedWorkCount;
+ Mutexed<NamedTimePoint> mQueueDeadline;
+ Mutexed<NamedTimePoint> mEosDeadline;
+ typedef CCodecConfig Config;
+ Mutexed<Config> mConfig;
+ Mutexed<std::list<std::unique_ptr<C2Work>>> mWorkDoneQueue;
+ Mutexed<std::list<size_t>> mNumDiscardedInputBuffersQueue;
+
+ friend class CCodecCallbackImpl;
+
+ DISALLOW_EVIL_CONSTRUCTORS(CCodec);
+};
+
+} // namespace android
+
+#endif // C_CODEC_H_
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
new file mode 100644
index 0000000..01b9c1e
--- /dev/null
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -0,0 +1,2791 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CCodecBufferChannel"
+#include <utils/Log.h>
+
+#include <numeric>
+
+#include <C2AllocatorGralloc.h>
+#include <C2PlatformSupport.h>
+#include <C2BlockInternal.h>
+#include <C2Config.h>
+#include <C2Debug.h>
+
+#include <android/hardware/cas/native/1.0/IDescrambler.h>
+#include <android-base/stringprintf.h>
+#include <binder/MemoryDealer.h>
+#include <gui/Surface.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ALookup.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
+#include <media/MediaCodecBuffer.h>
+#include <system/window.h>
+
+#include "CCodecBufferChannel.h"
+#include "Codec2Buffer.h"
+#include "SkipCutBuffer.h"
+
+namespace android {
+
+using android::base::StringPrintf;
+using hardware::hidl_handle;
+using hardware::hidl_string;
+using hardware::hidl_vec;
+using namespace hardware::cas::V1_0;
+using namespace hardware::cas::native::V1_0;
+
+using CasStatus = hardware::cas::V1_0::Status;
+
+/**
+ * Base class for representation of buffers at one port.
+ */
+class CCodecBufferChannel::Buffers {
+public:
+ Buffers(const char *componentName, const char *name = "Buffers")
+ : mComponentName(componentName),
+ mChannelName(std::string(componentName) + ":" + name),
+ mName(mChannelName.c_str()) {
+ }
+ virtual ~Buffers() = default;
+
+ /**
+ * Set format for MediaCodec-facing buffers.
+ */
+ void setFormat(const sp<AMessage> &format) {
+ CHECK(format != nullptr);
+ mFormat = format;
+ }
+
+ /**
+ * Return a copy of current format.
+ */
+ sp<AMessage> dupFormat() {
+ return mFormat != nullptr ? mFormat->dup() : nullptr;
+ }
+
+ /**
+ * Returns true if the buffers are operating under array mode.
+ */
+ virtual bool isArrayMode() const { return false; }
+
+ /**
+ * Fills the vector with MediaCodecBuffer's if in array mode; otherwise,
+ * no-op.
+ */
+ virtual void getArray(Vector<sp<MediaCodecBuffer>> *) const {}
+
+protected:
+ std::string mComponentName; ///< name of component for debugging
+ std::string mChannelName; ///< name of channel for debugging
+ const char *mName; ///< C-string version of channel name
+ // Format to be used for creating MediaCodec-facing buffers.
+ sp<AMessage> mFormat;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(Buffers);
+};
+
+class CCodecBufferChannel::InputBuffers : public CCodecBufferChannel::Buffers {
+public:
+ InputBuffers(const char *componentName, const char *name = "Input[]")
+ : Buffers(componentName, name) { }
+ virtual ~InputBuffers() = default;
+
+ /**
+ * Set a block pool to obtain input memory blocks.
+ */
+ void setPool(const std::shared_ptr<C2BlockPool> &pool) { mPool = pool; }
+
+ /**
+ * Get a new MediaCodecBuffer for input and its corresponding index.
+ * Returns false if no new buffer can be obtained at the moment.
+ */
+ virtual bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) = 0;
+
+ /**
+ * Release the buffer obtained from requestNewBuffer() and get the
+ * associated C2Buffer object back. Returns true if the buffer was on file
+ * and released successfully.
+ */
+ virtual bool releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) = 0;
+
+ /**
+ * Release the buffer that is no longer used by the codec process. Return
+ * true if and only if the buffer was on file and released successfully.
+ */
+ virtual bool expireComponentBuffer(
+ const std::shared_ptr<C2Buffer> &c2buffer) = 0;
+
+ /**
+ * Flush internal state. After this call, no index or buffer previously
+ * returned from requestNewBuffer() is valid.
+ */
+ virtual void flush() = 0;
+
+ /**
+ * Return array-backed version of input buffers. The returned object
+ * shall retain the internal state so that it will honor index and
+ * buffer from previous calls of requestNewBuffer().
+ */
+ virtual std::unique_ptr<InputBuffers> toArrayMode(size_t size) = 0;
+
+protected:
+ // Pool to obtain blocks for input buffers.
+ std::shared_ptr<C2BlockPool> mPool;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(InputBuffers);
+};
+
+class CCodecBufferChannel::OutputBuffers : public CCodecBufferChannel::Buffers {
+public:
+ OutputBuffers(const char *componentName, const char *name = "Output")
+ : Buffers(componentName, name) { }
+ virtual ~OutputBuffers() = default;
+
+ /**
+ * Register output C2Buffer from the component and obtain corresponding
+ * index and MediaCodecBuffer object. Returns false if registration
+ * fails.
+ */
+ virtual status_t registerBuffer(
+ const std::shared_ptr<C2Buffer> &buffer,
+ size_t *index,
+ sp<MediaCodecBuffer> *clientBuffer) = 0;
+
+ /**
+ * Register codec specific data as a buffer to be consistent with
+ * MediaCodec behavior.
+ */
+ virtual status_t registerCsd(
+ const C2StreamCsdInfo::output * /* csd */,
+ size_t * /* index */,
+ sp<MediaCodecBuffer> * /* clientBuffer */) = 0;
+
+ /**
+ * Release the buffer obtained from registerBuffer() and get the
+ * associated C2Buffer object back. Returns true if the buffer was on file
+ * and released successfully.
+ */
+ virtual bool releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) = 0;
+
+ /**
+ * Flush internal state. After this call, no index or buffer previously
+ * returned from registerBuffer() is valid.
+ */
+ virtual void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) = 0;
+
+ /**
+ * Return array-backed version of output buffers. The returned object
+ * shall retain the internal state so that it will honor index and
+ * buffer from previous calls of registerBuffer().
+ */
+ virtual std::unique_ptr<OutputBuffers> toArrayMode(size_t size) = 0;
+
+ /**
+ * Initialize SkipCutBuffer object.
+ */
+ void initSkipCutBuffer(
+ int32_t delay, int32_t padding, int32_t sampleRate, int32_t channelCount) {
+ CHECK(mSkipCutBuffer == nullptr);
+ mDelay = delay;
+ mPadding = padding;
+ mSampleRate = sampleRate;
+ setSkipCutBuffer(delay, padding, channelCount);
+ }
+
+ /**
+ * Update the SkipCutBuffer object. No-op if it's never initialized.
+ */
+ void updateSkipCutBuffer(int32_t sampleRate, int32_t channelCount) {
+ if (mSkipCutBuffer == nullptr) {
+ return;
+ }
+ int32_t delay = mDelay;
+ int32_t padding = mPadding;
+ if (sampleRate != mSampleRate) {
+ delay = ((int64_t)delay * sampleRate) / mSampleRate;
+ padding = ((int64_t)padding * sampleRate) / mSampleRate;
+ }
+ setSkipCutBuffer(delay, padding, channelCount);
+ }
+
+ /**
+ * Submit buffer to SkipCutBuffer object, if initialized.
+ */
+ void submit(const sp<MediaCodecBuffer> &buffer) {
+ if (mSkipCutBuffer != nullptr) {
+ mSkipCutBuffer->submit(buffer);
+ }
+ }
+
+ /**
+ * Transfer SkipCutBuffer object to the other Buffers object.
+ */
+ void transferSkipCutBuffer(const sp<SkipCutBuffer> &scb) {
+ mSkipCutBuffer = scb;
+ }
+
+protected:
+ sp<SkipCutBuffer> mSkipCutBuffer;
+
+private:
+ int32_t mDelay;
+ int32_t mPadding;
+ int32_t mSampleRate;
+
+ void setSkipCutBuffer(int32_t skip, int32_t cut, int32_t channelCount) {
+ if (mSkipCutBuffer != nullptr) {
+ size_t prevSize = mSkipCutBuffer->size();
+ if (prevSize != 0u) {
+ ALOGD("[%s] Replacing SkipCutBuffer holding %zu bytes", mName, prevSize);
+ }
+ }
+ mSkipCutBuffer = new SkipCutBuffer(skip, cut, channelCount);
+ }
+
+ DISALLOW_EVIL_CONSTRUCTORS(OutputBuffers);
+};
+
+namespace {
+
+// TODO: get this info from component
+const static size_t kMinInputBufferArraySize = 4;
+const static size_t kMaxPipelineCapacity = 18;
+const static size_t kChannelOutputDelay = 0;
+const static size_t kMinOutputBufferArraySize = kMaxPipelineCapacity +
+ kChannelOutputDelay;
+const static size_t kLinearBufferSize = 1048576;
+// This can fit 4K RGBA frame, and most likely client won't need more than this.
+const static size_t kMaxLinearBufferSize = 3840 * 2160 * 4;
+
+/**
+ * Simple local buffer pool backed by std::vector.
+ */
+class LocalBufferPool : public std::enable_shared_from_this<LocalBufferPool> {
+public:
+ /**
+ * Create a new LocalBufferPool object.
+ *
+ * \param poolCapacity max total size of buffers managed by this pool.
+ *
+ * \return a newly created pool object.
+ */
+ static std::shared_ptr<LocalBufferPool> Create(size_t poolCapacity) {
+ return std::shared_ptr<LocalBufferPool>(new LocalBufferPool(poolCapacity));
+ }
+
+ /**
+ * Return an ABuffer object whose size is at least |capacity|.
+ *
+ * \param capacity requested capacity
+ * \return nullptr if the pool capacity is reached
+ * an ABuffer object otherwise.
+ */
+ sp<ABuffer> newBuffer(size_t capacity) {
+ Mutex::Autolock lock(mMutex);
+ auto it = std::find_if(
+ mPool.begin(), mPool.end(),
+ [capacity](const std::vector<uint8_t> &vec) {
+ return vec.capacity() >= capacity;
+ });
+ if (it != mPool.end()) {
+ sp<ABuffer> buffer = new VectorBuffer(std::move(*it), shared_from_this());
+ mPool.erase(it);
+ return buffer;
+ }
+ if (mUsedSize + capacity > mPoolCapacity) {
+ while (!mPool.empty()) {
+ mUsedSize -= mPool.back().capacity();
+ mPool.pop_back();
+ }
+ if (mUsedSize + capacity > mPoolCapacity) {
+ ALOGD("mUsedSize = %zu, capacity = %zu, mPoolCapacity = %zu",
+ mUsedSize, capacity, mPoolCapacity);
+ return nullptr;
+ }
+ }
+ std::vector<uint8_t> vec(capacity);
+ mUsedSize += vec.capacity();
+ return new VectorBuffer(std::move(vec), shared_from_this());
+ }
+
+private:
+ /**
+ * ABuffer backed by std::vector.
+ */
+ class VectorBuffer : public ::android::ABuffer {
+ public:
+ /**
+ * Construct a VectorBuffer by taking the ownership of supplied vector.
+ *
+ * \param vec backing vector of the buffer. this object takes
+ * ownership at construction.
+ * \param pool a LocalBufferPool object to return the vector at
+ * destruction.
+ */
+ VectorBuffer(std::vector<uint8_t> &&vec, const std::shared_ptr<LocalBufferPool> &pool)
+ : ABuffer(vec.data(), vec.capacity()),
+ mVec(std::move(vec)),
+ mPool(pool) {
+ }
+
+ ~VectorBuffer() override {
+ std::shared_ptr<LocalBufferPool> pool = mPool.lock();
+ if (pool) {
+ // If pool is alive, return the vector back to the pool so that
+ // it can be recycled.
+ pool->returnVector(std::move(mVec));
+ }
+ }
+
+ private:
+ std::vector<uint8_t> mVec;
+ std::weak_ptr<LocalBufferPool> mPool;
+ };
+
+ Mutex mMutex;
+ size_t mPoolCapacity;
+ size_t mUsedSize;
+ std::list<std::vector<uint8_t>> mPool;
+
+ /**
+ * Private constructor to prevent constructing non-managed LocalBufferPool.
+ */
+ explicit LocalBufferPool(size_t poolCapacity)
+ : mPoolCapacity(poolCapacity), mUsedSize(0) {
+ }
+
+ /**
+ * Take back the ownership of vec from the destructed VectorBuffer and put
+ * it in front of the pool.
+ */
+ void returnVector(std::vector<uint8_t> &&vec) {
+ Mutex::Autolock lock(mMutex);
+ mPool.push_front(std::move(vec));
+ }
+
+ DISALLOW_EVIL_CONSTRUCTORS(LocalBufferPool);
+};
+
+sp<GraphicBlockBuffer> AllocateGraphicBuffer(
+ const std::shared_ptr<C2BlockPool> &pool,
+ const sp<AMessage> &format,
+ uint32_t pixelFormat,
+ const C2MemoryUsage &usage,
+ const std::shared_ptr<LocalBufferPool> &localBufferPool) {
+ int32_t width, height;
+ if (!format->findInt32("width", &width) || !format->findInt32("height", &height)) {
+ ALOGD("format lacks width or height");
+ return nullptr;
+ }
+
+ std::shared_ptr<C2GraphicBlock> block;
+ c2_status_t err = pool->fetchGraphicBlock(
+ width, height, pixelFormat, usage, &block);
+ if (err != C2_OK) {
+ ALOGD("fetch graphic block failed: %d", err);
+ return nullptr;
+ }
+
+ return GraphicBlockBuffer::Allocate(
+ format,
+ block,
+ [localBufferPool](size_t capacity) {
+ return localBufferPool->newBuffer(capacity);
+ });
+}
+
+class BuffersArrayImpl;
+
+/**
+ * Flexible buffer slots implementation.
+ */
+class FlexBuffersImpl {
+public:
+ FlexBuffersImpl(const char *name)
+ : mImplName(std::string(name) + ".Impl"),
+ mName(mImplName.c_str()) { }
+
+ /**
+ * Assign an empty slot for a buffer and return the index. If there's no
+ * empty slot, just add one at the end and return it.
+ *
+ * \param buffer[in] a new buffer to assign a slot.
+ * \return index of the assigned slot.
+ */
+ size_t assignSlot(const sp<Codec2Buffer> &buffer) {
+ for (size_t i = 0; i < mBuffers.size(); ++i) {
+ if (mBuffers[i].clientBuffer == nullptr
+ && mBuffers[i].compBuffer.expired()) {
+ mBuffers[i].clientBuffer = buffer;
+ return i;
+ }
+ }
+ mBuffers.push_back({ buffer, std::weak_ptr<C2Buffer>() });
+ return mBuffers.size() - 1;
+ }
+
+ /**
+ * Release the slot from the client, and get the C2Buffer object back from
+ * the previously assigned buffer. Note that the slot is not completely free
+ * until the returned C2Buffer object is freed.
+ *
+ * \param buffer[in] the buffer previously assigned a slot.
+ * \param c2buffer[in,out] pointer to C2Buffer to be populated. Ignored
+ * if null.
+ * \return true if the buffer is successfully released from a slot
+ * false otherwise
+ */
+ bool releaseSlot(const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) {
+ sp<Codec2Buffer> clientBuffer;
+ size_t index = mBuffers.size();
+ for (size_t i = 0; i < mBuffers.size(); ++i) {
+ if (mBuffers[i].clientBuffer == buffer) {
+ clientBuffer = mBuffers[i].clientBuffer;
+ mBuffers[i].clientBuffer.clear();
+ index = i;
+ break;
+ }
+ }
+ if (clientBuffer == nullptr) {
+ ALOGV("[%s] %s: No matching buffer found", mName, __func__);
+ return false;
+ }
+ std::shared_ptr<C2Buffer> result = clientBuffer->asC2Buffer();
+ mBuffers[index].compBuffer = result;
+ if (c2buffer) {
+ *c2buffer = result;
+ }
+ return true;
+ }
+
+ bool expireComponentBuffer(const std::shared_ptr<C2Buffer> &c2buffer) {
+ for (size_t i = 0; i < mBuffers.size(); ++i) {
+ std::shared_ptr<C2Buffer> compBuffer =
+ mBuffers[i].compBuffer.lock();
+ if (!compBuffer || compBuffer != c2buffer) {
+ continue;
+ }
+ mBuffers[i].clientBuffer = nullptr;
+ mBuffers[i].compBuffer.reset();
+ return true;
+ }
+ ALOGV("[%s] codec released an unknown buffer", mName);
+ return false;
+ }
+
+ void flush() {
+ ALOGV("[%s] buffers are flushed %zu", mName, mBuffers.size());
+ mBuffers.clear();
+ }
+
+private:
+ friend class BuffersArrayImpl;
+
+ std::string mImplName; ///< name for debugging
+ const char *mName; ///< C-string version of name
+
+ struct Entry {
+ sp<Codec2Buffer> clientBuffer;
+ std::weak_ptr<C2Buffer> compBuffer;
+ };
+ std::vector<Entry> mBuffers;
+};
+
+/**
+ * Static buffer slots implementation based on a fixed-size array.
+ */
+class BuffersArrayImpl {
+public:
+ BuffersArrayImpl()
+ : mImplName("BuffersArrayImpl"),
+ mName(mImplName.c_str()) { }
+
+ /**
+ * Initialize buffer array from the original |impl|. The buffers known by
+ * the client is preserved, and the empty slots are populated so that the
+ * array size is at least |minSize|.
+ *
+ * \param impl[in] FlexBuffersImpl object used so far.
+ * \param minSize[in] minimum size of the buffer array.
+ * \param allocate[in] function to allocate a client buffer for an empty slot.
+ */
+ void initialize(
+ const FlexBuffersImpl &impl,
+ size_t minSize,
+ std::function<sp<Codec2Buffer>()> allocate) {
+ mImplName = impl.mImplName + "[N]";
+ mName = mImplName.c_str();
+ for (size_t i = 0; i < impl.mBuffers.size(); ++i) {
+ sp<Codec2Buffer> clientBuffer = impl.mBuffers[i].clientBuffer;
+ bool ownedByClient = (clientBuffer != nullptr);
+ if (!ownedByClient) {
+ clientBuffer = allocate();
+ }
+ mBuffers.push_back({ clientBuffer, impl.mBuffers[i].compBuffer, ownedByClient });
+ }
+ ALOGV("[%s] converted %zu buffers to array mode of %zu", mName, mBuffers.size(), minSize);
+ for (size_t i = impl.mBuffers.size(); i < minSize; ++i) {
+ mBuffers.push_back({ allocate(), std::weak_ptr<C2Buffer>(), false });
+ }
+ }
+
+ /**
+ * Grab a buffer from the underlying array which matches the criteria.
+ *
+ * \param index[out] index of the slot.
+ * \param buffer[out] the matching buffer.
+ * \param match[in] a function to test whether the buffer matches the
+ * criteria or not.
+ * \return OK if successful,
+ * WOULD_BLOCK if slots are being used,
+ * NO_MEMORY if no slot matches the criteria, even though it's
+ * available
+ */
+ status_t grabBuffer(
+ size_t *index,
+ sp<Codec2Buffer> *buffer,
+ std::function<bool(const sp<Codec2Buffer> &)> match =
+ [](const sp<Codec2Buffer> &) { return true; }) {
+ // allBuffersDontMatch remains true if all buffers are available but
+ // match() returns false for every buffer.
+ bool allBuffersDontMatch = true;
+ for (size_t i = 0; i < mBuffers.size(); ++i) {
+ if (!mBuffers[i].ownedByClient && mBuffers[i].compBuffer.expired()) {
+ if (match(mBuffers[i].clientBuffer)) {
+ mBuffers[i].ownedByClient = true;
+ *buffer = mBuffers[i].clientBuffer;
+ (*buffer)->meta()->clear();
+ (*buffer)->setRange(0, (*buffer)->capacity());
+ *index = i;
+ return OK;
+ }
+ } else {
+ allBuffersDontMatch = false;
+ }
+ }
+ return allBuffersDontMatch ? NO_MEMORY : WOULD_BLOCK;
+ }
+
+ /**
+ * Return the buffer from the client, and get the C2Buffer object back from
+ * the buffer. Note that the slot is not completely free until the returned
+ * C2Buffer object is freed.
+ *
+ * \param buffer[in] the buffer previously grabbed.
+ * \param c2buffer[in,out] pointer to C2Buffer to be populated. Ignored
+ * if null.
+ * \return true if the buffer is successfully returned
+ * false otherwise
+ */
+ bool returnBuffer(const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) {
+ sp<Codec2Buffer> clientBuffer;
+ size_t index = mBuffers.size();
+ for (size_t i = 0; i < mBuffers.size(); ++i) {
+ if (mBuffers[i].clientBuffer == buffer) {
+ if (!mBuffers[i].ownedByClient) {
+ ALOGD("[%s] Client returned a buffer it does not own according to our record: %zu", mName, i);
+ }
+ clientBuffer = mBuffers[i].clientBuffer;
+ mBuffers[i].ownedByClient = false;
+ index = i;
+ break;
+ }
+ }
+ if (clientBuffer == nullptr) {
+ ALOGV("[%s] %s: No matching buffer found", mName, __func__);
+ return false;
+ }
+ ALOGV("[%s] %s: matching buffer found (index=%zu)", mName, __func__, index);
+ std::shared_ptr<C2Buffer> result = clientBuffer->asC2Buffer();
+ mBuffers[index].compBuffer = result;
+ if (c2buffer) {
+ *c2buffer = result;
+ }
+ return true;
+ }
+
+ bool expireComponentBuffer(const std::shared_ptr<C2Buffer> &c2buffer) {
+ for (size_t i = 0; i < mBuffers.size(); ++i) {
+ std::shared_ptr<C2Buffer> compBuffer =
+ mBuffers[i].compBuffer.lock();
+ if (!compBuffer) {
+ continue;
+ }
+ if (c2buffer == compBuffer) {
+ if (mBuffers[i].ownedByClient) {
+ // This should not happen.
+ ALOGD("[%s] codec released a buffer owned by client "
+ "(index %zu)", mName, i);
+ mBuffers[i].ownedByClient = false;
+ }
+ mBuffers[i].compBuffer.reset();
+ return true;
+ }
+ }
+ ALOGV("[%s] codec released an unknown buffer (array mode)", mName);
+ return false;
+ }
+
+ /**
+ * Populate |array| with the underlying buffer array.
+ *
+ * \param array[out] an array to be filled with the underlying buffer array.
+ */
+ void getArray(Vector<sp<MediaCodecBuffer>> *array) const {
+ array->clear();
+ for (const Entry &entry : mBuffers) {
+ array->push(entry.clientBuffer);
+ }
+ }
+
+ /**
+ * The client abandoned all known buffers, so reclaim the ownership.
+ */
+ void flush() {
+ for (Entry &entry : mBuffers) {
+ entry.ownedByClient = false;
+ }
+ }
+
+ void realloc(std::function<sp<Codec2Buffer>()> alloc) {
+ size_t size = mBuffers.size();
+ mBuffers.clear();
+ for (size_t i = 0; i < size; ++i) {
+ mBuffers.push_back({ alloc(), std::weak_ptr<C2Buffer>(), false });
+ }
+ }
+
+private:
+ std::string mImplName; ///< name for debugging
+ const char *mName; ///< C-string version of name
+
+ struct Entry {
+ const sp<Codec2Buffer> clientBuffer;
+ std::weak_ptr<C2Buffer> compBuffer;
+ bool ownedByClient;
+ };
+ std::vector<Entry> mBuffers;
+};
+
+class InputBuffersArray : public CCodecBufferChannel::InputBuffers {
+public:
+ InputBuffersArray(const char *componentName, const char *name = "Input[N]")
+ : InputBuffers(componentName, name) { }
+ ~InputBuffersArray() override = default;
+
+ void initialize(
+ const FlexBuffersImpl &impl,
+ size_t minSize,
+ std::function<sp<Codec2Buffer>()> allocate) {
+ mImpl.initialize(impl, minSize, allocate);
+ }
+
+ bool isArrayMode() const final { return true; }
+
+ std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode(
+ size_t) final {
+ return nullptr;
+ }
+
+ void getArray(Vector<sp<MediaCodecBuffer>> *array) const final {
+ mImpl.getArray(array);
+ }
+
+ bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ sp<Codec2Buffer> c2Buffer;
+ status_t err = mImpl.grabBuffer(index, &c2Buffer);
+ if (err == OK) {
+ c2Buffer->setFormat(mFormat);
+ *buffer = c2Buffer;
+ return true;
+ }
+ return false;
+ }
+
+ bool releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
+ return mImpl.returnBuffer(buffer, c2buffer);
+ }
+
+ bool expireComponentBuffer(
+ const std::shared_ptr<C2Buffer> &c2buffer) override {
+ return mImpl.expireComponentBuffer(c2buffer);
+ }
+
+ void flush() override {
+ mImpl.flush();
+ }
+
+private:
+ BuffersArrayImpl mImpl;
+};
+
+class LinearInputBuffers : public CCodecBufferChannel::InputBuffers {
+public:
+ LinearInputBuffers(const char *componentName, const char *name = "1D-Input")
+ : InputBuffers(componentName, name),
+ mImpl(mName) { }
+
+ bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ int32_t capacity = kLinearBufferSize;
+ (void)mFormat->findInt32(KEY_MAX_INPUT_SIZE, &capacity);
+ if ((size_t)capacity > kMaxLinearBufferSize) {
+ ALOGD("client requested %d, capped to %zu", capacity, kMaxLinearBufferSize);
+ capacity = kMaxLinearBufferSize;
+ }
+ // TODO: proper max input size
+ // TODO: read usage from intf
+ sp<Codec2Buffer> newBuffer = alloc((size_t)capacity);
+ if (newBuffer == nullptr) {
+ return false;
+ }
+ *index = mImpl.assignSlot(newBuffer);
+ *buffer = newBuffer;
+ return true;
+ }
+
+ bool releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
+ return mImpl.releaseSlot(buffer, c2buffer);
+ }
+
+ bool expireComponentBuffer(
+ const std::shared_ptr<C2Buffer> &c2buffer) override {
+ return mImpl.expireComponentBuffer(c2buffer);
+ }
+
+ void flush() override {
+ // This is no-op by default unless we're in array mode where we need to keep
+ // track of the flushed work.
+ mImpl.flush();
+ }
+
+ std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode(
+ size_t size) final {
+ int32_t capacity = kLinearBufferSize;
+ (void)mFormat->findInt32(C2_NAME_STREAM_MAX_BUFFER_SIZE_SETTING, &capacity);
+
+ std::unique_ptr<InputBuffersArray> array(
+ new InputBuffersArray(mComponentName.c_str(), "1D-Input[N]"));
+ array->setPool(mPool);
+ array->setFormat(mFormat);
+ array->initialize(
+ mImpl,
+ size,
+ [this, capacity] () -> sp<Codec2Buffer> { return alloc(capacity); });
+ return std::move(array);
+ }
+
+ virtual sp<Codec2Buffer> alloc(size_t size) const {
+ C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ std::shared_ptr<C2LinearBlock> block;
+
+ c2_status_t err = mPool->fetchLinearBlock(size, usage, &block);
+ if (err != C2_OK) {
+ return nullptr;
+ }
+
+ return LinearBlockBuffer::Allocate(mFormat, block);
+ }
+
+private:
+ FlexBuffersImpl mImpl;
+};
+
+class EncryptedLinearInputBuffers : public LinearInputBuffers {
+public:
+ EncryptedLinearInputBuffers(
+ bool secure,
+ const sp<MemoryDealer> &dealer,
+ const sp<ICrypto> &crypto,
+ int32_t heapSeqNum,
+ size_t capacity,
+ const char *componentName, const char *name = "EncryptedInput")
+ : LinearInputBuffers(componentName, name),
+ mUsage({0, 0}),
+ mDealer(dealer),
+ mCrypto(crypto),
+ mHeapSeqNum(heapSeqNum) {
+ if (secure) {
+ mUsage = { C2MemoryUsage::READ_PROTECTED, 0 };
+ } else {
+ mUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ }
+ for (size_t i = 0; i < kMinInputBufferArraySize; ++i) {
+ sp<IMemory> memory = mDealer->allocate(capacity);
+ if (memory == nullptr) {
+ ALOGD("[%s] Failed to allocate memory from dealer: only %zu slots allocated", mName, i);
+ break;
+ }
+ mMemoryVector.push_back({std::weak_ptr<C2LinearBlock>(), memory});
+ }
+ }
+
+ ~EncryptedLinearInputBuffers() override {
+ }
+
+ sp<Codec2Buffer> alloc(size_t size) const override {
+ sp<IMemory> memory;
+ for (const Entry &entry : mMemoryVector) {
+ if (entry.block.expired()) {
+ memory = entry.memory;
+ break;
+ }
+ }
+ if (memory == nullptr) {
+ return nullptr;
+ }
+
+ std::shared_ptr<C2LinearBlock> block;
+ c2_status_t err = mPool->fetchLinearBlock(size, mUsage, &block);
+ if (err != C2_OK) {
+ return nullptr;
+ }
+
+ return new EncryptedLinearBlockBuffer(mFormat, block, memory, mHeapSeqNum);
+ }
+
+private:
+ C2MemoryUsage mUsage;
+ sp<MemoryDealer> mDealer;
+ sp<ICrypto> mCrypto;
+ int32_t mHeapSeqNum;
+ struct Entry {
+ std::weak_ptr<C2LinearBlock> block;
+ sp<IMemory> memory;
+ };
+ std::vector<Entry> mMemoryVector;
+};
+
+class GraphicMetadataInputBuffers : public CCodecBufferChannel::InputBuffers {
+public:
+ GraphicMetadataInputBuffers(const char *componentName, const char *name = "2D-MetaInput")
+ : InputBuffers(componentName, name),
+ mImpl(mName),
+ mStore(GetCodec2PlatformAllocatorStore()) { }
+ ~GraphicMetadataInputBuffers() override = default;
+
+ bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ std::shared_ptr<C2Allocator> alloc;
+ c2_status_t err = mStore->fetchAllocator(mPool->getAllocatorId(), &alloc);
+ if (err != C2_OK) {
+ return false;
+ }
+ sp<GraphicMetadataBuffer> newBuffer = new GraphicMetadataBuffer(mFormat, alloc);
+ if (newBuffer == nullptr) {
+ return false;
+ }
+ *index = mImpl.assignSlot(newBuffer);
+ *buffer = newBuffer;
+ return true;
+ }
+
+ bool releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
+ return mImpl.releaseSlot(buffer, c2buffer);
+ }
+
+ bool expireComponentBuffer(
+ const std::shared_ptr<C2Buffer> &c2buffer) override {
+ return mImpl.expireComponentBuffer(c2buffer);
+ }
+
+ void flush() override {
+ // This is no-op by default unless we're in array mode where we need to keep
+ // track of the flushed work.
+ }
+
+ std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode(
+ size_t size) final {
+ std::shared_ptr<C2Allocator> alloc;
+ c2_status_t err = mStore->fetchAllocator(mPool->getAllocatorId(), &alloc);
+ if (err != C2_OK) {
+ return nullptr;
+ }
+ std::unique_ptr<InputBuffersArray> array(
+ new InputBuffersArray(mComponentName.c_str(), "2D-MetaInput[N]"));
+ array->setPool(mPool);
+ array->setFormat(mFormat);
+ array->initialize(
+ mImpl,
+ size,
+ [format = mFormat, alloc]() -> sp<Codec2Buffer> {
+ return new GraphicMetadataBuffer(format, alloc);
+ });
+ return std::move(array);
+ }
+
+private:
+ FlexBuffersImpl mImpl;
+ std::shared_ptr<C2AllocatorStore> mStore;
+};
+
+class GraphicInputBuffers : public CCodecBufferChannel::InputBuffers {
+public:
+ GraphicInputBuffers(const char *componentName, const char *name = "2D-BB-Input")
+ : InputBuffers(componentName, name),
+ mImpl(mName),
+ mLocalBufferPool(LocalBufferPool::Create(
+ kMaxLinearBufferSize * kMinInputBufferArraySize)) { }
+ ~GraphicInputBuffers() override = default;
+
+ bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ // TODO: proper max input size
+ // TODO: read usage from intf
+ C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ sp<GraphicBlockBuffer> newBuffer = AllocateGraphicBuffer(
+ mPool, mFormat, HAL_PIXEL_FORMAT_YV12, usage, mLocalBufferPool);
+ if (newBuffer == nullptr) {
+ return false;
+ }
+ *index = mImpl.assignSlot(newBuffer);
+ *buffer = newBuffer;
+ return true;
+ }
+
+ bool releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
+ return mImpl.releaseSlot(buffer, c2buffer);
+ }
+
+ bool expireComponentBuffer(
+ const std::shared_ptr<C2Buffer> &c2buffer) override {
+ return mImpl.expireComponentBuffer(c2buffer);
+ }
+ void flush() override {
+ // This is no-op by default unless we're in array mode where we need to keep
+ // track of the flushed work.
+ }
+
+ std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode(
+ size_t size) final {
+ std::unique_ptr<InputBuffersArray> array(
+ new InputBuffersArray(mComponentName.c_str(), "2D-BB-Input[N]"));
+ array->setPool(mPool);
+ array->setFormat(mFormat);
+ array->initialize(
+ mImpl,
+ size,
+ [pool = mPool, format = mFormat, lbp = mLocalBufferPool]() -> sp<Codec2Buffer> {
+ C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ return AllocateGraphicBuffer(
+ pool, format, HAL_PIXEL_FORMAT_YV12, usage, lbp);
+ });
+ return std::move(array);
+ }
+
+private:
+ FlexBuffersImpl mImpl;
+ std::shared_ptr<LocalBufferPool> mLocalBufferPool;
+};
+
+class DummyInputBuffers : public CCodecBufferChannel::InputBuffers {
+public:
+ DummyInputBuffers(const char *componentName, const char *name = "2D-Input")
+ : InputBuffers(componentName, name) { }
+
+ bool requestNewBuffer(size_t *, sp<MediaCodecBuffer> *) override {
+ return false;
+ }
+
+ bool releaseBuffer(
+ const sp<MediaCodecBuffer> &, std::shared_ptr<C2Buffer> *) override {
+ return false;
+ }
+
+ bool expireComponentBuffer(const std::shared_ptr<C2Buffer> &) override {
+ return false;
+ }
+ void flush() override {
+ }
+
+ std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode(
+ size_t) final {
+ return nullptr;
+ }
+
+ bool isArrayMode() const final { return true; }
+
+ void getArray(Vector<sp<MediaCodecBuffer>> *array) const final {
+ array->clear();
+ }
+};
+
+class OutputBuffersArray : public CCodecBufferChannel::OutputBuffers {
+public:
+ OutputBuffersArray(const char *componentName, const char *name = "Output[N]")
+ : OutputBuffers(componentName, name) { }
+ ~OutputBuffersArray() override = default;
+
+ void initialize(
+ const FlexBuffersImpl &impl,
+ size_t minSize,
+ std::function<sp<Codec2Buffer>()> allocate) {
+ mImpl.initialize(impl, minSize, allocate);
+ }
+
+ bool isArrayMode() const final { return true; }
+
+ std::unique_ptr<CCodecBufferChannel::OutputBuffers> toArrayMode(
+ size_t) final {
+ return nullptr;
+ }
+
+ status_t registerBuffer(
+ const std::shared_ptr<C2Buffer> &buffer,
+ size_t *index,
+ sp<MediaCodecBuffer> *clientBuffer) final {
+ sp<Codec2Buffer> c2Buffer;
+ status_t err = mImpl.grabBuffer(
+ index,
+ &c2Buffer,
+ [buffer](const sp<Codec2Buffer> &clientBuffer) {
+ return clientBuffer->canCopy(buffer);
+ });
+ if (err == WOULD_BLOCK) {
+ ALOGV("[%s] buffers temporarily not available", mName);
+ return err;
+ } else if (err != OK) {
+ ALOGD("[%s] grabBuffer failed: %d", mName, err);
+ return err;
+ }
+ c2Buffer->setFormat(mFormat);
+ if (!c2Buffer->copy(buffer)) {
+ ALOGD("[%s] copy buffer failed", mName);
+ return WOULD_BLOCK;
+ }
+ submit(c2Buffer);
+ *clientBuffer = c2Buffer;
+ ALOGV("[%s] grabbed buffer %zu", mName, *index);
+ return OK;
+ }
+
+ status_t registerCsd(
+ const C2StreamCsdInfo::output *csd,
+ size_t *index,
+ sp<MediaCodecBuffer> *clientBuffer) final {
+ sp<Codec2Buffer> c2Buffer;
+ status_t err = mImpl.grabBuffer(
+ index,
+ &c2Buffer,
+ [csd](const sp<Codec2Buffer> &clientBuffer) {
+ return clientBuffer->base() != nullptr
+ && clientBuffer->capacity() >= csd->flexCount();
+ });
+ if (err != OK) {
+ return err;
+ }
+ memcpy(c2Buffer->base(), csd->m.value, csd->flexCount());
+ c2Buffer->setRange(0, csd->flexCount());
+ c2Buffer->setFormat(mFormat);
+ *clientBuffer = c2Buffer;
+ return OK;
+ }
+
+ bool releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
+ return mImpl.returnBuffer(buffer, c2buffer);
+ }
+
+ void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) override {
+ (void)flushedWork;
+ mImpl.flush();
+ if (mSkipCutBuffer != nullptr) {
+ mSkipCutBuffer->clear();
+ }
+ }
+
+ void getArray(Vector<sp<MediaCodecBuffer>> *array) const final {
+ mImpl.getArray(array);
+ }
+
+ void realloc(const std::shared_ptr<C2Buffer> &c2buffer) {
+ std::function<sp<Codec2Buffer>()> alloc;
+ switch (c2buffer->data().type()) {
+ case C2BufferData::LINEAR: {
+ uint32_t size = kLinearBufferSize;
+ const C2ConstLinearBlock &block = c2buffer->data().linearBlocks().front();
+ if (block.size() < kMaxLinearBufferSize / 2) {
+ size = block.size() * 2;
+ } else {
+ size = kMaxLinearBufferSize;
+ }
+ alloc = [format = mFormat, size] {
+ return new LocalLinearBuffer(format, new ABuffer(size));
+ };
+ break;
+ }
+
+ // TODO: add support
+ case C2BufferData::GRAPHIC: FALLTHROUGH_INTENDED;
+
+ case C2BufferData::INVALID: FALLTHROUGH_INTENDED;
+ case C2BufferData::LINEAR_CHUNKS: FALLTHROUGH_INTENDED;
+ case C2BufferData::GRAPHIC_CHUNKS: FALLTHROUGH_INTENDED;
+ default:
+ ALOGD("Unsupported type: %d", (int)c2buffer->data().type());
+ return;
+ }
+ mImpl.realloc(alloc);
+ }
+
+private:
+ BuffersArrayImpl mImpl;
+};
+
+class FlexOutputBuffers : public CCodecBufferChannel::OutputBuffers {
+public:
+ FlexOutputBuffers(const char *componentName, const char *name = "Output[]")
+ : OutputBuffers(componentName, name),
+ mImpl(mName) { }
+
+ status_t registerBuffer(
+ const std::shared_ptr<C2Buffer> &buffer,
+ size_t *index,
+ sp<MediaCodecBuffer> *clientBuffer) override {
+ sp<Codec2Buffer> newBuffer = wrap(buffer);
+ newBuffer->setFormat(mFormat);
+ *index = mImpl.assignSlot(newBuffer);
+ *clientBuffer = newBuffer;
+ ALOGV("[%s] registered buffer %zu", mName, *index);
+ return OK;
+ }
+
+ status_t registerCsd(
+ const C2StreamCsdInfo::output *csd,
+ size_t *index,
+ sp<MediaCodecBuffer> *clientBuffer) final {
+ sp<Codec2Buffer> newBuffer = new LocalLinearBuffer(
+ mFormat, ABuffer::CreateAsCopy(csd->m.value, csd->flexCount()));
+ *index = mImpl.assignSlot(newBuffer);
+ *clientBuffer = newBuffer;
+ return OK;
+ }
+
+ bool releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
+ return mImpl.releaseSlot(buffer, c2buffer);
+ }
+
+ void flush(
+ const std::list<std::unique_ptr<C2Work>> &flushedWork) override {
+ (void) flushedWork;
+ // This is no-op by default unless we're in array mode where we need to keep
+ // track of the flushed work.
+ }
+
+ std::unique_ptr<CCodecBufferChannel::OutputBuffers> toArrayMode(
+ size_t size) override {
+ std::unique_ptr<OutputBuffersArray> array(new OutputBuffersArray(mComponentName.c_str()));
+ array->setFormat(mFormat);
+ array->transferSkipCutBuffer(mSkipCutBuffer);
+ array->initialize(
+ mImpl,
+ size,
+ [this]() { return allocateArrayBuffer(); });
+ return std::move(array);
+ }
+
+ /**
+ * Return an appropriate Codec2Buffer object for the type of buffers.
+ *
+ * \param buffer C2Buffer object to wrap.
+ *
+ * \return appropriate Codec2Buffer object to wrap |buffer|.
+ */
+ virtual sp<Codec2Buffer> wrap(const std::shared_ptr<C2Buffer> &buffer) = 0;
+
+ /**
+ * Return an appropriate Codec2Buffer object for the type of buffers, to be
+ * used as an empty array buffer.
+ *
+ * \return appropriate Codec2Buffer object which can copy() from C2Buffers.
+ */
+ virtual sp<Codec2Buffer> allocateArrayBuffer() = 0;
+
+private:
+ FlexBuffersImpl mImpl;
+};
+
+class LinearOutputBuffers : public FlexOutputBuffers {
+public:
+ LinearOutputBuffers(const char *componentName, const char *name = "1D-Output")
+ : FlexOutputBuffers(componentName, name) { }
+
+ void flush(
+ const std::list<std::unique_ptr<C2Work>> &flushedWork) override {
+ if (mSkipCutBuffer != nullptr) {
+ mSkipCutBuffer->clear();
+ }
+ FlexOutputBuffers::flush(flushedWork);
+ }
+
+ sp<Codec2Buffer> wrap(const std::shared_ptr<C2Buffer> &buffer) override {
+ if (buffer == nullptr) {
+ ALOGV("[%s] using a dummy buffer", mName);
+ return new LocalLinearBuffer(mFormat, new ABuffer(0));
+ }
+ if (buffer->data().type() != C2BufferData::LINEAR) {
+ ALOGV("[%s] non-linear buffer %d", mName, buffer->data().type());
+ // We expect linear output buffers from the component.
+ return nullptr;
+ }
+ if (buffer->data().linearBlocks().size() != 1u) {
+ ALOGV("[%s] no linear buffers", mName);
+ // We expect one and only one linear block from the component.
+ return nullptr;
+ }
+ sp<Codec2Buffer> clientBuffer = ConstLinearBlockBuffer::Allocate(mFormat, buffer);
+ submit(clientBuffer);
+ return clientBuffer;
+ }
+
+ sp<Codec2Buffer> allocateArrayBuffer() override {
+ // TODO: proper max output size
+ return new LocalLinearBuffer(mFormat, new ABuffer(kLinearBufferSize));
+ }
+};
+
+class GraphicOutputBuffers : public FlexOutputBuffers {
+public:
+ GraphicOutputBuffers(const char *componentName, const char *name = "2D-Output")
+ : FlexOutputBuffers(componentName, name) { }
+
+ sp<Codec2Buffer> wrap(const std::shared_ptr<C2Buffer> &buffer) override {
+ return new DummyContainerBuffer(mFormat, buffer);
+ }
+
+ sp<Codec2Buffer> allocateArrayBuffer() override {
+ return new DummyContainerBuffer(mFormat);
+ }
+};
+
+class RawGraphicOutputBuffers : public FlexOutputBuffers {
+public:
+ RawGraphicOutputBuffers(const char *componentName, const char *name = "2D-BB-Output")
+ : FlexOutputBuffers(componentName, name),
+ mLocalBufferPool(LocalBufferPool::Create(
+ kMaxLinearBufferSize * kMinOutputBufferArraySize)) { }
+ ~RawGraphicOutputBuffers() override = default;
+
+ sp<Codec2Buffer> wrap(const std::shared_ptr<C2Buffer> &buffer) override {
+ if (buffer == nullptr) {
+ sp<Codec2Buffer> c2buffer = ConstGraphicBlockBuffer::AllocateEmpty(
+ mFormat,
+ [lbp = mLocalBufferPool](size_t capacity) {
+ return lbp->newBuffer(capacity);
+ });
+ c2buffer->setRange(0, 0);
+ return c2buffer;
+ } else {
+ return ConstGraphicBlockBuffer::Allocate(
+ mFormat,
+ buffer,
+ [lbp = mLocalBufferPool](size_t capacity) {
+ return lbp->newBuffer(capacity);
+ });
+ }
+ }
+
+ sp<Codec2Buffer> allocateArrayBuffer() override {
+ return ConstGraphicBlockBuffer::AllocateEmpty(
+ mFormat,
+ [lbp = mLocalBufferPool](size_t capacity) {
+ return lbp->newBuffer(capacity);
+ });
+ }
+
+private:
+ std::shared_ptr<LocalBufferPool> mLocalBufferPool;
+};
+
+} // namespace
+
+CCodecBufferChannel::QueueGuard::QueueGuard(
+ CCodecBufferChannel::QueueSync &sync) : mSync(sync) {
+ Mutex::Autolock l(mSync.mGuardLock);
+ // At this point it's guaranteed that mSync is not under state transition,
+ // as we are holding its mutex.
+
+ Mutexed<CCodecBufferChannel::QueueSync::Counter>::Locked count(mSync.mCount);
+ if (count->value == -1) {
+ mRunning = false;
+ } else {
+ ++count->value;
+ mRunning = true;
+ }
+}
+
+CCodecBufferChannel::QueueGuard::~QueueGuard() {
+ if (mRunning) {
+ // We are not holding mGuardLock at this point so that QueueSync::stop() can
+ // keep holding the lock until mCount reaches zero.
+ Mutexed<CCodecBufferChannel::QueueSync::Counter>::Locked count(mSync.mCount);
+ --count->value;
+ count->cond.broadcast();
+ }
+}
+
+void CCodecBufferChannel::QueueSync::start() {
+ Mutex::Autolock l(mGuardLock);
+ // If stopped, it goes to running state; otherwise no-op.
+ Mutexed<Counter>::Locked count(mCount);
+ if (count->value == -1) {
+ count->value = 0;
+ }
+}
+
+void CCodecBufferChannel::QueueSync::stop() {
+ Mutex::Autolock l(mGuardLock);
+ Mutexed<Counter>::Locked count(mCount);
+ if (count->value == -1) {
+ // no-op
+ return;
+ }
+ // Holding mGuardLock here blocks creation of additional QueueGuard objects, so
+ // mCount can only decrement. In other words, threads that acquired the lock
+ // are allowed to finish execution but additional threads trying to acquire
+ // the lock at this point will block, and then get QueueGuard at STOPPED
+ // state.
+ while (count->value != 0) {
+ count.waitForCondition(count->cond);
+ }
+ count->value = -1;
+}
+
+// CCodecBufferChannel::PipelineCapacity
+
+CCodecBufferChannel::PipelineCapacity::PipelineCapacity()
+ : input(0), component(0),
+ mName("<UNKNOWN COMPONENT>") {
+}
+
+void CCodecBufferChannel::PipelineCapacity::initialize(
+ int newInput,
+ int newComponent,
+ const char* newName,
+ const char* callerTag) {
+ input.store(newInput, std::memory_order_relaxed);
+ component.store(newComponent, std::memory_order_relaxed);
+ mName = newName;
+ ALOGV("[%s] %s -- PipelineCapacity::initialize(): "
+ "pipeline availability initialized ==> "
+ "input = %d, component = %d",
+ mName, callerTag ? callerTag : "*",
+ newInput, newComponent);
+}
+
+bool CCodecBufferChannel::PipelineCapacity::allocate(const char* callerTag) {
+ int prevInput = input.fetch_sub(1, std::memory_order_relaxed);
+ int prevComponent = component.fetch_sub(1, std::memory_order_relaxed);
+ if (prevInput > 0 && prevComponent > 0) {
+ ALOGV("[%s] %s -- PipelineCapacity::allocate() returns true: "
+ "pipeline availability -1 all ==> "
+ "input = %d, component = %d",
+ mName, callerTag ? callerTag : "*",
+ prevInput - 1,
+ prevComponent - 1);
+ return true;
+ }
+ input.fetch_add(1, std::memory_order_relaxed);
+ component.fetch_add(1, std::memory_order_relaxed);
+ ALOGV("[%s] %s -- PipelineCapacity::allocate() returns false: "
+ "pipeline availability unchanged ==> "
+ "input = %d, component = %d",
+ mName, callerTag ? callerTag : "*",
+ prevInput,
+ prevComponent);
+ return false;
+}
+
+void CCodecBufferChannel::PipelineCapacity::free(const char* callerTag) {
+ int prevInput = input.fetch_add(1, std::memory_order_relaxed);
+ int prevComponent = component.fetch_add(1, std::memory_order_relaxed);
+ ALOGV("[%s] %s -- PipelineCapacity::free(): "
+ "pipeline availability +1 all ==> "
+ "input = %d, component = %d",
+ mName, callerTag ? callerTag : "*",
+ prevInput + 1,
+ prevComponent + 1);
+}
+
+int CCodecBufferChannel::PipelineCapacity::freeInputSlots(
+ size_t numDiscardedInputBuffers,
+ const char* callerTag) {
+ int prevInput = input.fetch_add(numDiscardedInputBuffers,
+ std::memory_order_relaxed);
+ ALOGV("[%s] %s -- PipelineCapacity::freeInputSlots(%zu): "
+ "pipeline availability +%zu input ==> "
+ "input = %d, component = %d",
+ mName, callerTag ? callerTag : "*",
+ numDiscardedInputBuffers,
+ numDiscardedInputBuffers,
+ prevInput + static_cast<int>(numDiscardedInputBuffers),
+ component.load(std::memory_order_relaxed));
+ return prevInput + static_cast<int>(numDiscardedInputBuffers);
+}
+
+int CCodecBufferChannel::PipelineCapacity::freeComponentSlot(
+ const char* callerTag) {
+ int prevComponent = component.fetch_add(1, std::memory_order_relaxed);
+ ALOGV("[%s] %s -- PipelineCapacity::freeComponentSlot(): "
+ "pipeline availability +1 component ==> "
+ "input = %d, component = %d",
+ mName, callerTag ? callerTag : "*",
+ input.load(std::memory_order_relaxed),
+ prevComponent + 1);
+ return prevComponent + 1;
+}
+
+// CCodecBufferChannel::ReorderStash
+
+CCodecBufferChannel::ReorderStash::ReorderStash() {
+ clear();
+}
+
+void CCodecBufferChannel::ReorderStash::clear() {
+ mPending.clear();
+ mStash.clear();
+ mDepth = 0;
+ mKey = C2Config::ORDINAL;
+}
+
+void CCodecBufferChannel::ReorderStash::setDepth(uint32_t depth) {
+ mPending.splice(mPending.end(), mStash);
+ mDepth = depth;
+}
+void CCodecBufferChannel::ReorderStash::setKey(C2Config::ordinal_key_t key) {
+ mPending.splice(mPending.end(), mStash);
+ mKey = key;
+}
+
+bool CCodecBufferChannel::ReorderStash::pop(Entry *entry) {
+ if (mPending.empty()) {
+ return false;
+ }
+ entry->buffer = mPending.front().buffer;
+ entry->timestamp = mPending.front().timestamp;
+ entry->flags = mPending.front().flags;
+ entry->ordinal = mPending.front().ordinal;
+ mPending.pop_front();
+ return true;
+}
+
+void CCodecBufferChannel::ReorderStash::emplace(
+ const std::shared_ptr<C2Buffer> &buffer,
+ int64_t timestamp,
+ int32_t flags,
+ const C2WorkOrdinalStruct &ordinal) {
+ for (auto it = mStash.begin(); it != mStash.end(); ++it) {
+ if (less(ordinal, it->ordinal)) {
+ mStash.emplace(it, buffer, timestamp, flags, ordinal);
+ return;
+ }
+ }
+ mStash.emplace_back(buffer, timestamp, flags, ordinal);
+ while (!mStash.empty() && mStash.size() > mDepth) {
+ mPending.push_back(mStash.front());
+ mStash.pop_front();
+ }
+}
+
+void CCodecBufferChannel::ReorderStash::defer(
+ const CCodecBufferChannel::ReorderStash::Entry &entry) {
+ mPending.push_front(entry);
+}
+
+bool CCodecBufferChannel::ReorderStash::hasPending() const {
+ return !mPending.empty();
+}
+
+bool CCodecBufferChannel::ReorderStash::less(
+ const C2WorkOrdinalStruct &o1, const C2WorkOrdinalStruct &o2) {
+ switch (mKey) {
+ case C2Config::ORDINAL: return o1.frameIndex < o2.frameIndex;
+ case C2Config::TIMESTAMP: return o1.timestamp < o2.timestamp;
+ case C2Config::CUSTOM: return o1.customOrdinal < o2.customOrdinal;
+ default:
+ ALOGD("Unrecognized key; default to timestamp");
+ return o1.frameIndex < o2.frameIndex;
+ }
+}
+
+// CCodecBufferChannel
+
+CCodecBufferChannel::CCodecBufferChannel(
+ const std::shared_ptr<CCodecCallback> &callback)
+ : mHeapSeqNum(-1),
+ mCCodecCallback(callback),
+ mFrameIndex(0u),
+ mFirstValidFrameIndex(0u),
+ mMetaMode(MODE_NONE),
+ mAvailablePipelineCapacity(),
+ mInputMetEos(false) {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ buffers->reset(new DummyInputBuffers(""));
+}
+
+CCodecBufferChannel::~CCodecBufferChannel() {
+ if (mCrypto != nullptr && mDealer != nullptr && mHeapSeqNum >= 0) {
+ mCrypto->unsetHeap(mHeapSeqNum);
+ }
+}
+
+void CCodecBufferChannel::setComponent(
+ const std::shared_ptr<Codec2Client::Component> &component) {
+ mComponent = component;
+ mComponentName = component->getName() + StringPrintf("#%d", int(uintptr_t(component.get()) % 997));
+ mName = mComponentName.c_str();
+}
+
+status_t CCodecBufferChannel::setInputSurface(
+ const std::shared_ptr<InputSurfaceWrapper> &surface) {
+ ALOGV("[%s] setInputSurface", mName);
+ mInputSurface = surface;
+ return mInputSurface->connect(mComponent);
+}
+
+status_t CCodecBufferChannel::signalEndOfInputStream() {
+ if (mInputSurface == nullptr) {
+ return INVALID_OPERATION;
+ }
+ return mInputSurface->signalEndOfInputStream();
+}
+
+status_t CCodecBufferChannel::queueInputBufferInternal(const sp<MediaCodecBuffer> &buffer) {
+ int64_t timeUs;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+ if (mInputMetEos) {
+ ALOGD("[%s] buffers after EOS ignored (%lld us)", mName, (long long)timeUs);
+ return OK;
+ }
+
+ int32_t flags = 0;
+ int32_t tmp = 0;
+ bool eos = false;
+ if (buffer->meta()->findInt32("eos", &tmp) && tmp) {
+ eos = true;
+ mInputMetEos = true;
+ ALOGV("[%s] input EOS", mName);
+ }
+ if (buffer->meta()->findInt32("csd", &tmp) && tmp) {
+ flags |= C2FrameData::FLAG_CODEC_CONFIG;
+ }
+ ALOGV("[%s] queueInputBuffer: buffer->size() = %zu", mName, buffer->size());
+ std::unique_ptr<C2Work> work(new C2Work);
+ work->input.ordinal.timestamp = timeUs;
+ work->input.ordinal.frameIndex = mFrameIndex++;
+ // WORKAROUND: until codecs support handling work after EOS and max output sizing, use timestamp
+ // manipulation to achieve image encoding via video codec, and to constrain encoded output.
+ // Keep client timestamp in customOrdinal
+ work->input.ordinal.customOrdinal = timeUs;
+ work->input.buffers.clear();
+
+ if (buffer->size() > 0u) {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ std::shared_ptr<C2Buffer> c2buffer;
+ if (!(*buffers)->releaseBuffer(buffer, &c2buffer)) {
+ return -ENOENT;
+ }
+ work->input.buffers.push_back(c2buffer);
+ } else {
+ mAvailablePipelineCapacity.freeInputSlots(1, "queueInputBufferInternal");
+ if (eos) {
+ flags |= C2FrameData::FLAG_END_OF_STREAM;
+ }
+ }
+ work->input.flags = (C2FrameData::flags_t)flags;
+ // TODO: fill info's
+
+ work->input.configUpdate = std::move(mParamsToBeSet);
+ work->worklets.clear();
+ work->worklets.emplace_back(new C2Worklet);
+
+ std::list<std::unique_ptr<C2Work>> items;
+ items.push_back(std::move(work));
+ c2_status_t err = mComponent->queue(&items);
+
+ if (err == C2_OK && eos && buffer->size() > 0u) {
+ mCCodecCallback->onWorkQueued(false);
+ work.reset(new C2Work);
+ work->input.ordinal.timestamp = timeUs;
+ work->input.ordinal.frameIndex = mFrameIndex++;
+ // WORKAROUND: keep client timestamp in customOrdinal
+ work->input.ordinal.customOrdinal = timeUs;
+ work->input.buffers.clear();
+ work->input.flags = C2FrameData::FLAG_END_OF_STREAM;
+
+ items.clear();
+ items.push_back(std::move(work));
+ err = mComponent->queue(&items);
+ }
+ if (err == C2_OK) {
+ mCCodecCallback->onWorkQueued(eos);
+ }
+
+ feedInputBufferIfAvailableInternal();
+ return err;
+}
+
+status_t CCodecBufferChannel::setParameters(std::vector<std::unique_ptr<C2Param>> ¶ms) {
+ QueueGuard guard(mSync);
+ if (!guard.isRunning()) {
+ ALOGD("[%s] setParameters is only supported in the running state.", mName);
+ return -ENOSYS;
+ }
+ mParamsToBeSet.insert(mParamsToBeSet.end(),
+ std::make_move_iterator(params.begin()),
+ std::make_move_iterator(params.end()));
+ params.clear();
+ return OK;
+}
+
+status_t CCodecBufferChannel::queueInputBuffer(const sp<MediaCodecBuffer> &buffer) {
+ QueueGuard guard(mSync);
+ if (!guard.isRunning()) {
+ ALOGD("[%s] No more buffers should be queued at current state.", mName);
+ return -ENOSYS;
+ }
+ return queueInputBufferInternal(buffer);
+}
+
+status_t CCodecBufferChannel::queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer, bool secure, const uint8_t *key,
+ const uint8_t *iv, CryptoPlugin::Mode mode, CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ AString *errorDetailMsg) {
+ QueueGuard guard(mSync);
+ if (!guard.isRunning()) {
+ ALOGD("[%s] No more buffers should be queued at current state.", mName);
+ return -ENOSYS;
+ }
+
+ if (!hasCryptoOrDescrambler()) {
+ return -ENOSYS;
+ }
+ sp<EncryptedLinearBlockBuffer> encryptedBuffer((EncryptedLinearBlockBuffer *)buffer.get());
+
+ ssize_t result = -1;
+ ssize_t codecDataOffset = 0;
+ if (mCrypto != nullptr) {
+ ICrypto::DestinationBuffer destination;
+ if (secure) {
+ destination.mType = ICrypto::kDestinationTypeNativeHandle;
+ destination.mHandle = encryptedBuffer->handle();
+ } else {
+ destination.mType = ICrypto::kDestinationTypeSharedMemory;
+ destination.mSharedMemory = mDecryptDestination;
+ }
+ ICrypto::SourceBuffer source;
+ encryptedBuffer->fillSourceBuffer(&source);
+ result = mCrypto->decrypt(
+ key, iv, mode, pattern, source, buffer->offset(),
+ subSamples, numSubSamples, destination, errorDetailMsg);
+ if (result < 0) {
+ return result;
+ }
+ if (destination.mType == ICrypto::kDestinationTypeSharedMemory) {
+ encryptedBuffer->copyDecryptedContent(mDecryptDestination, result);
+ }
+ } else {
+ // Here we cast CryptoPlugin::SubSample to hardware::cas::native::V1_0::SubSample
+ // directly, the structure definitions should match as checked in DescramblerImpl.cpp.
+ hidl_vec<SubSample> hidlSubSamples;
+ hidlSubSamples.setToExternal((SubSample *)subSamples, numSubSamples, false /*own*/);
+
+ hardware::cas::native::V1_0::SharedBuffer srcBuffer;
+ encryptedBuffer->fillSourceBuffer(&srcBuffer);
+
+ DestinationBuffer dstBuffer;
+ if (secure) {
+ dstBuffer.type = BufferType::NATIVE_HANDLE;
+ dstBuffer.secureMemory = hidl_handle(encryptedBuffer->handle());
+ } else {
+ dstBuffer.type = BufferType::SHARED_MEMORY;
+ dstBuffer.nonsecureMemory = srcBuffer;
+ }
+
+ CasStatus status = CasStatus::OK;
+ hidl_string detailedError;
+ ScramblingControl sctrl = ScramblingControl::UNSCRAMBLED;
+
+ if (key != nullptr) {
+ sctrl = (ScramblingControl)key[0];
+ // Adjust for the PES offset
+ codecDataOffset = key[2] | (key[3] << 8);
+ }
+
+ auto returnVoid = mDescrambler->descramble(
+ sctrl,
+ hidlSubSamples,
+ srcBuffer,
+ 0,
+ dstBuffer,
+ 0,
+ [&status, &result, &detailedError] (
+ CasStatus _status, uint32_t _bytesWritten,
+ const hidl_string& _detailedError) {
+ status = _status;
+ result = (ssize_t)_bytesWritten;
+ detailedError = _detailedError;
+ });
+
+ if (!returnVoid.isOk() || status != CasStatus::OK || result < 0) {
+ ALOGI("[%s] descramble failed, trans=%s, status=%d, result=%zd",
+ mName, returnVoid.description().c_str(), status, result);
+ return UNKNOWN_ERROR;
+ }
+
+ if (result < codecDataOffset) {
+ ALOGD("invalid codec data offset: %zd, result %zd", codecDataOffset, result);
+ return BAD_VALUE;
+ }
+
+ ALOGV("[%s] descramble succeeded, %zd bytes", mName, result);
+
+ if (dstBuffer.type == BufferType::SHARED_MEMORY) {
+ encryptedBuffer->copyDecryptedContentFromMemory(result);
+ }
+ }
+
+ buffer->setRange(codecDataOffset, result - codecDataOffset);
+ return queueInputBufferInternal(buffer);
+}
+
+void CCodecBufferChannel::feedInputBufferIfAvailable() {
+ QueueGuard guard(mSync);
+ if (!guard.isRunning()) {
+ ALOGV("[%s] We're not running --- no input buffer reported", mName);
+ return;
+ }
+ feedInputBufferIfAvailableInternal();
+}
+
+void CCodecBufferChannel::feedInputBufferIfAvailableInternal() {
+ while (!mInputMetEos &&
+ !mReorderStash.lock()->hasPending() &&
+ mAvailablePipelineCapacity.allocate("feedInputBufferIfAvailable")) {
+ sp<MediaCodecBuffer> inBuffer;
+ size_t index;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (!(*buffers)->requestNewBuffer(&index, &inBuffer)) {
+ ALOGV("[%s] no new buffer available", mName);
+ mAvailablePipelineCapacity.free("feedInputBufferIfAvailable");
+ break;
+ }
+ }
+ ALOGV("[%s] new input index = %zu [%p]", mName, index, inBuffer.get());
+ mCallback->onInputBufferAvailable(index, inBuffer);
+ }
+}
+
+status_t CCodecBufferChannel::renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
+ std::shared_ptr<C2Buffer> c2Buffer;
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ if (*buffers) {
+ (*buffers)->releaseBuffer(buffer, &c2Buffer);
+ }
+ }
+ if (!c2Buffer) {
+ return INVALID_OPERATION;
+ }
+ sendOutputBuffers();
+
+#if 0
+ const std::vector<std::shared_ptr<const C2Info>> infoParams = c2Buffer->info();
+ ALOGV("[%s] queuing gfx buffer with %zu infos", mName, infoParams.size());
+ for (const std::shared_ptr<const C2Info> &info : infoParams) {
+ AString res;
+ for (size_t ix = 0; ix + 3 < info->size(); ix += 4) {
+ if (ix) res.append(", ");
+ res.append(*((int32_t*)info.get() + (ix / 4)));
+ }
+ ALOGV(" [%s]", res.c_str());
+ }
+#endif
+ std::shared_ptr<const C2StreamRotationInfo::output> rotation =
+ std::static_pointer_cast<const C2StreamRotationInfo::output>(
+ c2Buffer->getInfo(C2StreamRotationInfo::output::PARAM_TYPE));
+ bool flip = rotation && (rotation->flip & 1);
+ uint32_t quarters = ((rotation ? rotation->value : 0) / 90) & 3;
+ uint32_t transform = 0;
+ switch (quarters) {
+ case 0: // no rotation
+ transform = flip ? HAL_TRANSFORM_FLIP_H : 0;
+ break;
+ case 1: // 90 degrees counter-clockwise
+ transform = flip ? (HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90)
+ : HAL_TRANSFORM_ROT_270;
+ break;
+ case 2: // 180 degrees
+ transform = flip ? HAL_TRANSFORM_FLIP_V : HAL_TRANSFORM_ROT_180;
+ break;
+ case 3: // 90 degrees clockwise
+ transform = flip ? (HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90)
+ : HAL_TRANSFORM_ROT_90;
+ break;
+ }
+
+ std::shared_ptr<const C2StreamSurfaceScalingInfo::output> surfaceScaling =
+ std::static_pointer_cast<const C2StreamSurfaceScalingInfo::output>(
+ c2Buffer->getInfo(C2StreamSurfaceScalingInfo::output::PARAM_TYPE));
+ uint32_t videoScalingMode = NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW;
+ if (surfaceScaling) {
+ videoScalingMode = surfaceScaling->value;
+ }
+
+ // Use dataspace from format as it has the default aspects already applied
+ android_dataspace_t dataSpace = HAL_DATASPACE_UNKNOWN; // this is 0
+ (void)buffer->format()->findInt32("android._dataspace", (int32_t *)&dataSpace);
+
+ // HDR static info
+ std::shared_ptr<const C2StreamHdrStaticInfo::output> hdrStaticInfo =
+ std::static_pointer_cast<const C2StreamHdrStaticInfo::output>(
+ c2Buffer->getInfo(C2StreamHdrStaticInfo::output::PARAM_TYPE));
+
+ {
+ Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ if (output->surface == nullptr) {
+ ALOGI("[%s] cannot render buffer without surface", mName);
+ return OK;
+ }
+ }
+
+ std::vector<C2ConstGraphicBlock> blocks = c2Buffer->data().graphicBlocks();
+ if (blocks.size() != 1u) {
+ ALOGD("[%s] expected 1 graphic block, but got %zu", mName, blocks.size());
+ return UNKNOWN_ERROR;
+ }
+ const C2ConstGraphicBlock &block = blocks.front();
+
+ // TODO: revisit this after C2Fence implementation.
+ android::IGraphicBufferProducer::QueueBufferInput qbi(
+ timestampNs,
+ false, // droppable
+ dataSpace,
+ Rect(blocks.front().crop().left,
+ blocks.front().crop().top,
+ blocks.front().crop().right(),
+ blocks.front().crop().bottom()),
+ videoScalingMode,
+ transform,
+ Fence::NO_FENCE, 0);
+ if (hdrStaticInfo) {
+ struct android_smpte2086_metadata smpte2086_meta = {
+ .displayPrimaryRed = {
+ hdrStaticInfo->mastering.red.x, hdrStaticInfo->mastering.red.y
+ },
+ .displayPrimaryGreen = {
+ hdrStaticInfo->mastering.green.x, hdrStaticInfo->mastering.green.y
+ },
+ .displayPrimaryBlue = {
+ hdrStaticInfo->mastering.blue.x, hdrStaticInfo->mastering.blue.y
+ },
+ .whitePoint = {
+ hdrStaticInfo->mastering.white.x, hdrStaticInfo->mastering.white.y
+ },
+ .maxLuminance = hdrStaticInfo->mastering.maxLuminance,
+ .minLuminance = hdrStaticInfo->mastering.minLuminance,
+ };
+
+ struct android_cta861_3_metadata cta861_meta = {
+ .maxContentLightLevel = hdrStaticInfo->maxCll,
+ .maxFrameAverageLightLevel = hdrStaticInfo->maxFall,
+ };
+
+ HdrMetadata hdr;
+ hdr.validTypes = HdrMetadata::SMPTE2086 | HdrMetadata::CTA861_3;
+ hdr.smpte2086 = smpte2086_meta;
+ hdr.cta8613 = cta861_meta;
+ qbi.setHdrMetadata(hdr);
+ }
+ android::IGraphicBufferProducer::QueueBufferOutput qbo;
+ status_t result = mComponent->queueToOutputSurface(block, qbi, &qbo);
+ if (result != OK) {
+ ALOGI("[%s] queueBuffer failed: %d", mName, result);
+ return result;
+ }
+ ALOGV("[%s] queue buffer successful", mName);
+
+ int64_t mediaTimeUs = 0;
+ (void)buffer->meta()->findInt64("timeUs", &mediaTimeUs);
+ mCCodecCallback->onOutputFramesRendered(mediaTimeUs, timestampNs);
+
+ return OK;
+}
+
+status_t CCodecBufferChannel::discardBuffer(const sp<MediaCodecBuffer> &buffer) {
+ ALOGV("[%s] discardBuffer: %p", mName, buffer.get());
+ bool released = false;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (*buffers && (*buffers)->releaseBuffer(buffer, nullptr)) {
+ buffers.unlock();
+ released = true;
+ mAvailablePipelineCapacity.freeInputSlots(1, "discardBuffer");
+ }
+ }
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ if (*buffers && (*buffers)->releaseBuffer(buffer, nullptr)) {
+ buffers.unlock();
+ released = true;
+ }
+ }
+ if (released) {
+ feedInputBufferIfAvailable();
+ sendOutputBuffers();
+ } else {
+ ALOGD("[%s] MediaCodec discarded an unknown buffer", mName);
+ }
+ return OK;
+}
+
+void CCodecBufferChannel::getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+ array->clear();
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+
+ if (!(*buffers)->isArrayMode()) {
+ *buffers = (*buffers)->toArrayMode(kMinInputBufferArraySize);
+ }
+
+ (*buffers)->getArray(array);
+}
+
+void CCodecBufferChannel::getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+ array->clear();
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+
+ if (!(*buffers)->isArrayMode()) {
+ *buffers = (*buffers)->toArrayMode(kMinOutputBufferArraySize);
+ }
+
+ (*buffers)->getArray(array);
+}
+
+status_t CCodecBufferChannel::start(
+ const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) {
+ C2StreamBufferTypeSetting::input iStreamFormat(0u);
+ C2StreamBufferTypeSetting::output oStreamFormat(0u);
+ C2PortReorderBufferDepthTuning::output reorderDepth;
+ C2PortReorderKeySetting::output reorderKey;
+ c2_status_t err = mComponent->query(
+ {
+ &iStreamFormat,
+ &oStreamFormat,
+ &reorderDepth,
+ &reorderKey,
+ },
+ {},
+ C2_DONT_BLOCK,
+ nullptr);
+ if (err == C2_BAD_INDEX) {
+ if (!iStreamFormat || !oStreamFormat) {
+ return UNKNOWN_ERROR;
+ }
+ } else if (err != C2_OK) {
+ return UNKNOWN_ERROR;
+ }
+
+ {
+ Mutexed<ReorderStash>::Locked reorder(mReorderStash);
+ reorder->clear();
+ if (reorderDepth) {
+ reorder->setDepth(reorderDepth.value);
+ }
+ if (reorderKey) {
+ reorder->setKey(reorderKey.value);
+ }
+ }
+ // TODO: get this from input format
+ bool secure = mComponent->getName().find(".secure") != std::string::npos;
+
+ std::shared_ptr<C2AllocatorStore> allocatorStore = GetCodec2PlatformAllocatorStore();
+ int poolMask = property_get_int32(
+ "debug.stagefright.c2-poolmask",
+ 1 << C2PlatformAllocatorStore::ION |
+ 1 << C2PlatformAllocatorStore::BUFFERQUEUE);
+
+ if (inputFormat != nullptr) {
+ bool graphic = (iStreamFormat.value == C2FormatVideo);
+ std::shared_ptr<C2BlockPool> pool;
+ {
+ Mutexed<BlockPools>::Locked pools(mBlockPools);
+
+ // set default allocator ID.
+ pools->inputAllocatorId = (graphic) ? C2PlatformAllocatorStore::GRALLOC
+ : C2PlatformAllocatorStore::ION;
+
+ // query C2PortAllocatorsTuning::input from component. If an allocator ID is obtained
+ // from component, create the input block pool with given ID. Otherwise, use default IDs.
+ std::vector<std::unique_ptr<C2Param>> params;
+ err = mComponent->query({ },
+ { C2PortAllocatorsTuning::input::PARAM_TYPE },
+ C2_DONT_BLOCK,
+ ¶ms);
+ if ((err != C2_OK && err != C2_BAD_INDEX) || params.size() != 1) {
+ ALOGD("[%s] Query input allocators returned %zu params => %s (%u)",
+ mName, params.size(), asString(err), err);
+ } else if (err == C2_OK && params.size() == 1) {
+ C2PortAllocatorsTuning::input *inputAllocators =
+ C2PortAllocatorsTuning::input::From(params[0].get());
+ if (inputAllocators && inputAllocators->flexCount() > 0) {
+ std::shared_ptr<C2Allocator> allocator;
+ // verify allocator IDs and resolve default allocator
+ allocatorStore->fetchAllocator(inputAllocators->m.values[0], &allocator);
+ if (allocator) {
+ pools->inputAllocatorId = allocator->getId();
+ } else {
+ ALOGD("[%s] component requested invalid input allocator ID %u",
+ mName, inputAllocators->m.values[0]);
+ }
+ }
+ }
+
+ // TODO: use C2Component wrapper to associate this pool with ourselves
+ if ((poolMask >> pools->inputAllocatorId) & 1) {
+ err = CreateCodec2BlockPool(pools->inputAllocatorId, nullptr, &pool);
+ ALOGD("[%s] Created input block pool with allocatorID %u => poolID %llu - %s (%d)",
+ mName, pools->inputAllocatorId,
+ (unsigned long long)(pool ? pool->getLocalId() : 111000111),
+ asString(err), err);
+ } else {
+ err = C2_NOT_FOUND;
+ }
+ if (err != C2_OK) {
+ C2BlockPool::local_id_t inputPoolId =
+ graphic ? C2BlockPool::BASIC_GRAPHIC : C2BlockPool::BASIC_LINEAR;
+ err = GetCodec2BlockPool(inputPoolId, nullptr, &pool);
+ ALOGD("[%s] Using basic input block pool with poolID %llu => got %llu - %s (%d)",
+ mName, (unsigned long long)inputPoolId,
+ (unsigned long long)(pool ? pool->getLocalId() : 111000111),
+ asString(err), err);
+ if (err != C2_OK) {
+ return NO_MEMORY;
+ }
+ }
+ pools->inputPool = pool;
+ }
+
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (graphic) {
+ if (mInputSurface) {
+ buffers->reset(new DummyInputBuffers(mName));
+ } else if (mMetaMode == MODE_ANW) {
+ buffers->reset(new GraphicMetadataInputBuffers(mName));
+ } else {
+ buffers->reset(new GraphicInputBuffers(mName));
+ }
+ } else {
+ if (hasCryptoOrDescrambler()) {
+ int32_t capacity = kLinearBufferSize;
+ (void)inputFormat->findInt32(KEY_MAX_INPUT_SIZE, &capacity);
+ if ((size_t)capacity > kMaxLinearBufferSize) {
+ ALOGD("client requested %d, capped to %zu", capacity, kMaxLinearBufferSize);
+ capacity = kMaxLinearBufferSize;
+ }
+ if (mDealer == nullptr) {
+ mDealer = new MemoryDealer(
+ align(capacity, MemoryDealer::getAllocationAlignment())
+ * (kMinInputBufferArraySize + 1),
+ "EncryptedLinearInputBuffers");
+ mDecryptDestination = mDealer->allocate((size_t)capacity);
+ }
+ if (mCrypto != nullptr && mHeapSeqNum < 0) {
+ mHeapSeqNum = mCrypto->setHeap(mDealer->getMemoryHeap());
+ } else {
+ mHeapSeqNum = -1;
+ }
+ buffers->reset(new EncryptedLinearInputBuffers(
+ secure, mDealer, mCrypto, mHeapSeqNum, (size_t)capacity, mName));
+ } else {
+ buffers->reset(new LinearInputBuffers(mName));
+ }
+ }
+ (*buffers)->setFormat(inputFormat);
+
+ if (err == C2_OK) {
+ (*buffers)->setPool(pool);
+ } else {
+ // TODO: error
+ }
+ }
+
+ if (outputFormat != nullptr) {
+ sp<IGraphicBufferProducer> outputSurface;
+ uint32_t outputGeneration;
+ {
+ Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ outputSurface = output->surface ?
+ output->surface->getIGraphicBufferProducer() : nullptr;
+ outputGeneration = output->generation;
+ }
+
+ bool graphic = (oStreamFormat.value == C2FormatVideo);
+ C2BlockPool::local_id_t outputPoolId_;
+
+ {
+ Mutexed<BlockPools>::Locked pools(mBlockPools);
+
+ // set default allocator ID.
+ pools->outputAllocatorId = (graphic) ? C2PlatformAllocatorStore::GRALLOC
+ : C2PlatformAllocatorStore::ION;
+
+ // query C2PortAllocatorsTuning::output from component, or use default allocator if
+ // unsuccessful.
+ std::vector<std::unique_ptr<C2Param>> params;
+ err = mComponent->query({ },
+ { C2PortAllocatorsTuning::output::PARAM_TYPE },
+ C2_DONT_BLOCK,
+ ¶ms);
+ if ((err != C2_OK && err != C2_BAD_INDEX) || params.size() != 1) {
+ ALOGD("[%s] Query output allocators returned %zu params => %s (%u)",
+ mName, params.size(), asString(err), err);
+ } else if (err == C2_OK && params.size() == 1) {
+ C2PortAllocatorsTuning::output *outputAllocators =
+ C2PortAllocatorsTuning::output::From(params[0].get());
+ if (outputAllocators && outputAllocators->flexCount() > 0) {
+ std::shared_ptr<C2Allocator> allocator;
+ // verify allocator IDs and resolve default allocator
+ allocatorStore->fetchAllocator(outputAllocators->m.values[0], &allocator);
+ if (allocator) {
+ pools->outputAllocatorId = allocator->getId();
+ } else {
+ ALOGD("[%s] component requested invalid output allocator ID %u",
+ mName, outputAllocators->m.values[0]);
+ }
+ }
+ }
+
+ // use bufferqueue if outputting to a surface.
+ // query C2PortSurfaceAllocatorTuning::output from component, or use default allocator
+ // if unsuccessful.
+ if (outputSurface) {
+ params.clear();
+ err = mComponent->query({ },
+ { C2PortSurfaceAllocatorTuning::output::PARAM_TYPE },
+ C2_DONT_BLOCK,
+ ¶ms);
+ if ((err != C2_OK && err != C2_BAD_INDEX) || params.size() != 1) {
+ ALOGD("[%s] Query output surface allocator returned %zu params => %s (%u)",
+ mName, params.size(), asString(err), err);
+ } else if (err == C2_OK && params.size() == 1) {
+ C2PortSurfaceAllocatorTuning::output *surfaceAllocator =
+ C2PortSurfaceAllocatorTuning::output::From(params[0].get());
+ if (surfaceAllocator) {
+ std::shared_ptr<C2Allocator> allocator;
+ // verify allocator IDs and resolve default allocator
+ allocatorStore->fetchAllocator(surfaceAllocator->value, &allocator);
+ if (allocator) {
+ pools->outputAllocatorId = allocator->getId();
+ } else {
+ ALOGD("[%s] component requested invalid surface output allocator ID %u",
+ mName, surfaceAllocator->value);
+ err = C2_BAD_VALUE;
+ }
+ }
+ }
+ if (pools->outputAllocatorId == C2PlatformAllocatorStore::GRALLOC
+ && err != C2_OK
+ && ((poolMask >> C2PlatformAllocatorStore::BUFFERQUEUE) & 1)) {
+ pools->outputAllocatorId = C2PlatformAllocatorStore::BUFFERQUEUE;
+ }
+ }
+
+ if ((poolMask >> pools->outputAllocatorId) & 1) {
+ err = mComponent->createBlockPool(
+ pools->outputAllocatorId, &pools->outputPoolId, &pools->outputPoolIntf);
+ ALOGI("[%s] Created output block pool with allocatorID %u => poolID %llu - %s",
+ mName, pools->outputAllocatorId,
+ (unsigned long long)pools->outputPoolId,
+ asString(err));
+ } else {
+ err = C2_NOT_FOUND;
+ }
+ if (err != C2_OK) {
+ // use basic pool instead
+ pools->outputPoolId =
+ graphic ? C2BlockPool::BASIC_GRAPHIC : C2BlockPool::BASIC_LINEAR;
+ }
+
+ // Configure output block pool ID as parameter C2PortBlockPoolsTuning::output to
+ // component.
+ std::unique_ptr<C2PortBlockPoolsTuning::output> poolIdsTuning =
+ C2PortBlockPoolsTuning::output::AllocUnique({ pools->outputPoolId });
+
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ err = mComponent->config({ poolIdsTuning.get() }, C2_MAY_BLOCK, &failures);
+ ALOGD("[%s] Configured output block pool ids %llu => %s",
+ mName, (unsigned long long)poolIdsTuning->m.values[0], asString(err));
+ outputPoolId_ = pools->outputPoolId;
+ }
+
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+
+ if (graphic) {
+ if (outputSurface) {
+ buffers->reset(new GraphicOutputBuffers(mName));
+ } else {
+ buffers->reset(new RawGraphicOutputBuffers(mName));
+ }
+ } else {
+ buffers->reset(new LinearOutputBuffers(mName));
+ }
+ (*buffers)->setFormat(outputFormat->dup());
+
+
+ // Try to set output surface to created block pool if given.
+ if (outputSurface) {
+ mComponent->setOutputSurface(
+ outputPoolId_,
+ outputSurface,
+ outputGeneration);
+ }
+
+ if (oStreamFormat.value == C2BufferData::LINEAR
+ && mComponentName.find("c2.qti.") == std::string::npos) {
+ // WORKAROUND: if we're using early CSD workaround we convert to
+ // array mode, to appease apps assuming the output
+ // buffers to be of the same size.
+ (*buffers) = (*buffers)->toArrayMode(kMinOutputBufferArraySize);
+
+ int32_t channelCount;
+ int32_t sampleRate;
+ if (outputFormat->findInt32(KEY_CHANNEL_COUNT, &channelCount)
+ && outputFormat->findInt32(KEY_SAMPLE_RATE, &sampleRate)) {
+ int32_t delay = 0;
+ int32_t padding = 0;;
+ if (!outputFormat->findInt32("encoder-delay", &delay)) {
+ delay = 0;
+ }
+ if (!outputFormat->findInt32("encoder-padding", &padding)) {
+ padding = 0;
+ }
+ if (delay || padding) {
+ // We need write access to the buffers, and we're already in
+ // array mode.
+ (*buffers)->initSkipCutBuffer(delay, padding, sampleRate, channelCount);
+ }
+ }
+ }
+ }
+
+ // Set up pipeline control. This has to be done after mInputBuffers and
+ // mOutputBuffers are initialized to make sure that lingering callbacks
+ // about buffers from the previous generation do not interfere with the
+ // newly initialized pipeline capacity.
+
+ // Query delays
+ C2PortRequestedDelayTuning::input inputDelay;
+ C2PortRequestedDelayTuning::output outputDelay;
+ C2RequestedPipelineDelayTuning pipelineDelay;
+#if 0
+ err = mComponent->query(
+ { &inputDelay, &pipelineDelay, &outputDelay },
+ {},
+ C2_DONT_BLOCK,
+ nullptr);
+ mAvailablePipelineCapacity.initialize(
+ inputDelay,
+ inputDelay + pipelineDelay,
+ inputDelay + pipelineDelay + outputDelay,
+ mName);
+#else
+ mAvailablePipelineCapacity.initialize(
+ kMinInputBufferArraySize,
+ kMaxPipelineCapacity,
+ mName);
+#endif
+
+ mInputMetEos = false;
+ mSync.start();
+ return OK;
+}
+
+status_t CCodecBufferChannel::requestInitialInputBuffers() {
+ if (mInputSurface) {
+ return OK;
+ }
+
+ C2StreamFormatConfig::output oStreamFormat(0u);
+ c2_status_t err = mComponent->query({ &oStreamFormat }, {}, C2_DONT_BLOCK, nullptr);
+ if (err != C2_OK) {
+ return UNKNOWN_ERROR;
+ }
+ std::vector<sp<MediaCodecBuffer>> toBeQueued;
+ // TODO: use proper buffer depth instead of this random value
+ for (size_t i = 0; i < kMinInputBufferArraySize; ++i) {
+ size_t index;
+ sp<MediaCodecBuffer> buffer;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (!(*buffers)->requestNewBuffer(&index, &buffer)) {
+ if (i == 0) {
+ ALOGW("[%s] start: cannot allocate memory at all", mName);
+ return NO_MEMORY;
+ } else {
+ ALOGV("[%s] start: cannot allocate memory, only %zu buffers allocated",
+ mName, i);
+ }
+ break;
+ }
+ }
+ if (buffer) {
+ Mutexed<std::list<sp<ABuffer>>>::Locked configs(mFlushedConfigs);
+ ALOGV("[%s] input buffer %zu available", mName, index);
+ bool post = true;
+ if (!configs->empty()) {
+ sp<ABuffer> config = configs->front();
+ if (buffer->capacity() >= config->size()) {
+ memcpy(buffer->base(), config->data(), config->size());
+ buffer->setRange(0, config->size());
+ buffer->meta()->clear();
+ buffer->meta()->setInt64("timeUs", 0);
+ buffer->meta()->setInt32("csd", 1);
+ post = false;
+ } else {
+ ALOGD("[%s] buffer capacity too small for the config (%zu < %zu)",
+ mName, buffer->capacity(), config->size());
+ }
+ } else if (oStreamFormat.value == C2BufferData::LINEAR && i == 0
+ && mComponentName.find("c2.qti.") == std::string::npos) {
+ // WORKAROUND: Some apps expect CSD available without queueing
+ // any input. Queue an empty buffer to get the CSD.
+ buffer->setRange(0, 0);
+ buffer->meta()->clear();
+ buffer->meta()->setInt64("timeUs", 0);
+ post = false;
+ }
+ if (mAvailablePipelineCapacity.allocate("requestInitialInputBuffers")) {
+ if (post) {
+ mCallback->onInputBufferAvailable(index, buffer);
+ } else {
+ toBeQueued.emplace_back(buffer);
+ }
+ } else {
+ ALOGD("[%s] pipeline is full while requesting %zu-th input buffer",
+ mName, i);
+ }
+ }
+ }
+ for (const sp<MediaCodecBuffer> &buffer : toBeQueued) {
+ if (queueInputBufferInternal(buffer) != OK) {
+ mAvailablePipelineCapacity.freeComponentSlot("requestInitialInputBuffers");
+ }
+ }
+ return OK;
+}
+
+void CCodecBufferChannel::stop() {
+ mSync.stop();
+ mFirstValidFrameIndex = mFrameIndex.load(std::memory_order_relaxed);
+ if (mInputSurface != nullptr) {
+ mInputSurface->disconnect();
+ mInputSurface.reset();
+ }
+}
+
+void CCodecBufferChannel::flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) {
+ ALOGV("[%s] flush", mName);
+ {
+ Mutexed<std::list<sp<ABuffer>>>::Locked configs(mFlushedConfigs);
+ for (const std::unique_ptr<C2Work> &work : flushedWork) {
+ if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
+ continue;
+ }
+ if (work->input.buffers.empty()
+ || work->input.buffers.front()->data().linearBlocks().empty()) {
+ ALOGD("[%s] no linear codec config data found", mName);
+ continue;
+ }
+ C2ReadView view =
+ work->input.buffers.front()->data().linearBlocks().front().map().get();
+ if (view.error() != C2_OK) {
+ ALOGD("[%s] failed to map flushed codec config data: %d", mName, view.error());
+ continue;
+ }
+ configs->push_back(ABuffer::CreateAsCopy(view.data(), view.capacity()));
+ ALOGV("[%s] stashed flushed codec config data (size=%u)", mName, view.capacity());
+ }
+ }
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ (*buffers)->flush();
+ }
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ (*buffers)->flush(flushedWork);
+ }
+}
+
+void CCodecBufferChannel::onWorkDone(
+ std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
+ const C2StreamInitDataInfo::output *initData,
+ size_t numDiscardedInputBuffers) {
+ if (handleWork(std::move(work), outputFormat, initData)) {
+ mAvailablePipelineCapacity.freeInputSlots(numDiscardedInputBuffers,
+ "onWorkDone");
+ feedInputBufferIfAvailable();
+ }
+}
+
+void CCodecBufferChannel::onInputBufferDone(
+ const std::shared_ptr<C2Buffer>& buffer) {
+ bool newInputSlotAvailable;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ newInputSlotAvailable = (*buffers)->expireComponentBuffer(buffer);
+ if (newInputSlotAvailable) {
+ mAvailablePipelineCapacity.freeInputSlots(1, "onInputBufferDone");
+ }
+ }
+ if (newInputSlotAvailable) {
+ feedInputBufferIfAvailable();
+ }
+}
+
+bool CCodecBufferChannel::handleWork(
+ std::unique_ptr<C2Work> work,
+ const sp<AMessage> &outputFormat,
+ const C2StreamInitDataInfo::output *initData) {
+ if ((work->input.ordinal.frameIndex - mFirstValidFrameIndex.load()).peek() < 0) {
+ // Discard frames from previous generation.
+ ALOGD("[%s] Discard frames from previous generation.", mName);
+ return false;
+ }
+
+ if (work->worklets.size() != 1u
+ || !work->worklets.front()
+ || !(work->worklets.front()->output.flags & C2FrameData::FLAG_INCOMPLETE)) {
+ mAvailablePipelineCapacity.freeComponentSlot("handleWork");
+ }
+
+ if (work->result == C2_NOT_FOUND) {
+ ALOGD("[%s] flushed work; ignored.", mName);
+ return true;
+ }
+
+ if (work->result != C2_OK) {
+ ALOGD("[%s] work failed to complete: %d", mName, work->result);
+ mCCodecCallback->onError(work->result, ACTION_CODE_FATAL);
+ return false;
+ }
+
+ // NOTE: MediaCodec usage supposedly have only one worklet
+ if (work->worklets.size() != 1u) {
+ ALOGI("[%s] onWorkDone: incorrect number of worklets: %zu",
+ mName, work->worklets.size());
+ mCCodecCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return false;
+ }
+
+ const std::unique_ptr<C2Worklet> &worklet = work->worklets.front();
+
+ std::shared_ptr<C2Buffer> buffer;
+ // NOTE: MediaCodec usage supposedly have only one output stream.
+ if (worklet->output.buffers.size() > 1u) {
+ ALOGI("[%s] onWorkDone: incorrect number of output buffers: %zu",
+ mName, worklet->output.buffers.size());
+ mCCodecCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ return false;
+ } else if (worklet->output.buffers.size() == 1u) {
+ buffer = worklet->output.buffers[0];
+ if (!buffer) {
+ ALOGD("[%s] onWorkDone: nullptr found in buffers; ignored.", mName);
+ }
+ }
+
+ while (!worklet->output.configUpdate.empty()) {
+ std::unique_ptr<C2Param> param;
+ worklet->output.configUpdate.back().swap(param);
+ worklet->output.configUpdate.pop_back();
+ switch (param->coreIndex().coreIndex()) {
+ case C2PortReorderBufferDepthTuning::CORE_INDEX: {
+ C2PortReorderBufferDepthTuning::output reorderDepth;
+ if (reorderDepth.updateFrom(*param)) {
+ mReorderStash.lock()->setDepth(reorderDepth.value);
+ ALOGV("[%s] onWorkDone: updated reorder depth to %u",
+ mName, reorderDepth.value);
+ } else {
+ ALOGD("[%s] onWorkDone: failed to read reorder depth", mName);
+ }
+ break;
+ }
+ case C2PortReorderKeySetting::CORE_INDEX: {
+ C2PortReorderKeySetting::output reorderKey;
+ if (reorderKey.updateFrom(*param)) {
+ mReorderStash.lock()->setKey(reorderKey.value);
+ ALOGV("[%s] onWorkDone: updated reorder key to %u",
+ mName, reorderKey.value);
+ } else {
+ ALOGD("[%s] onWorkDone: failed to read reorder key", mName);
+ }
+ break;
+ }
+ default:
+ ALOGV("[%s] onWorkDone: unrecognized config update (%08X)",
+ mName, param->index());
+ break;
+ }
+ }
+
+ if (outputFormat != nullptr) {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ ALOGD("[%s] onWorkDone: output format changed to %s",
+ mName, outputFormat->debugString().c_str());
+ (*buffers)->setFormat(outputFormat);
+
+ AString mediaType;
+ if (outputFormat->findString(KEY_MIME, &mediaType)
+ && mediaType == MIMETYPE_AUDIO_RAW) {
+ int32_t channelCount;
+ int32_t sampleRate;
+ if (outputFormat->findInt32(KEY_CHANNEL_COUNT, &channelCount)
+ && outputFormat->findInt32(KEY_SAMPLE_RATE, &sampleRate)) {
+ (*buffers)->updateSkipCutBuffer(sampleRate, channelCount);
+ }
+ }
+ }
+
+ int32_t flags = 0;
+ if (worklet->output.flags & C2FrameData::FLAG_END_OF_STREAM) {
+ flags |= MediaCodec::BUFFER_FLAG_EOS;
+ ALOGV("[%s] onWorkDone: output EOS", mName);
+ }
+
+ sp<MediaCodecBuffer> outBuffer;
+ size_t index;
+
+ // WORKAROUND: adjust output timestamp based on client input timestamp and codec
+ // input timestamp. Codec output timestamp (in the timestamp field) shall correspond to
+ // the codec input timestamp, but client output timestamp should (reported in timeUs)
+ // shall correspond to the client input timesamp (in customOrdinal). By using the
+ // delta between the two, this allows for some timestamp deviation - e.g. if one input
+ // produces multiple output.
+ c2_cntr64_t timestamp =
+ worklet->output.ordinal.timestamp + work->input.ordinal.customOrdinal
+ - work->input.ordinal.timestamp;
+ ALOGV("[%s] onWorkDone: input %lld, codec %lld => output %lld => %lld",
+ mName,
+ work->input.ordinal.customOrdinal.peekll(),
+ work->input.ordinal.timestamp.peekll(),
+ worklet->output.ordinal.timestamp.peekll(),
+ timestamp.peekll());
+
+ if (initData != nullptr) {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ if ((*buffers)->registerCsd(initData, &index, &outBuffer) == OK) {
+ outBuffer->meta()->setInt64("timeUs", timestamp.peek());
+ outBuffer->meta()->setInt32("flags", MediaCodec::BUFFER_FLAG_CODECCONFIG);
+ ALOGV("[%s] onWorkDone: csd index = %zu [%p]", mName, index, outBuffer.get());
+
+ buffers.unlock();
+ mCallback->onOutputBufferAvailable(index, outBuffer);
+ buffers.lock();
+ } else {
+ ALOGD("[%s] onWorkDone: unable to register csd", mName);
+ buffers.unlock();
+ mCCodecCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ buffers.lock();
+ return false;
+ }
+ }
+
+ if (!buffer && !flags) {
+ ALOGV("[%s] onWorkDone: Not reporting output buffer (%lld)",
+ mName, work->input.ordinal.frameIndex.peekull());
+ return true;
+ }
+
+ if (buffer) {
+ for (const std::shared_ptr<const C2Info> &info : buffer->info()) {
+ // TODO: properly translate these to metadata
+ switch (info->coreIndex().coreIndex()) {
+ case C2StreamPictureTypeMaskInfo::CORE_INDEX:
+ if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2PictureTypeKeyFrame) {
+ flags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ {
+ Mutexed<ReorderStash>::Locked reorder(mReorderStash);
+ reorder->emplace(buffer, timestamp.peek(), flags, worklet->output.ordinal);
+ if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+ // Flush reorder stash
+ reorder->setDepth(0);
+ }
+ }
+ sendOutputBuffers();
+ return true;
+}
+
+void CCodecBufferChannel::sendOutputBuffers() {
+ ReorderStash::Entry entry;
+ sp<MediaCodecBuffer> outBuffer;
+ size_t index;
+
+ while (true) {
+ {
+ Mutexed<ReorderStash>::Locked reorder(mReorderStash);
+ if (!reorder->hasPending()) {
+ break;
+ }
+ if (!reorder->pop(&entry)) {
+ break;
+ }
+ }
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ status_t err = (*buffers)->registerBuffer(entry.buffer, &index, &outBuffer);
+ if (err != OK) {
+ if (err != WOULD_BLOCK) {
+ OutputBuffersArray *array = (OutputBuffersArray *)buffers->get();
+ array->realloc(entry.buffer);
+ mCCodecCallback->onOutputBuffersChanged();
+ }
+ buffers.unlock();
+ ALOGV("[%s] sendOutputBuffers: unable to register output buffer", mName);
+ mReorderStash.lock()->defer(entry);
+ return;
+ }
+ buffers.unlock();
+
+ outBuffer->meta()->setInt64("timeUs", entry.timestamp);
+ outBuffer->meta()->setInt32("flags", entry.flags);
+ ALOGV("[%s] sendOutputBuffers: out buffer index = %zu [%p] => %p + %zu",
+ mName, index, outBuffer.get(), outBuffer->data(), outBuffer->size());
+ mCallback->onOutputBufferAvailable(index, outBuffer);
+ }
+}
+
+status_t CCodecBufferChannel::setSurface(const sp<Surface> &newSurface) {
+ static std::atomic_uint32_t surfaceGeneration{0};
+ uint32_t generation = (getpid() << 10) |
+ ((surfaceGeneration.fetch_add(1, std::memory_order_relaxed) + 1)
+ & ((1 << 10) - 1));
+
+ sp<IGraphicBufferProducer> producer;
+ if (newSurface) {
+ newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+ newSurface->setMaxDequeuedBufferCount(kMinOutputBufferArraySize);
+ producer = newSurface->getIGraphicBufferProducer();
+ producer->setGenerationNumber(generation);
+ } else {
+ ALOGE("[%s] setting output surface to null", mName);
+ return INVALID_OPERATION;
+ }
+
+ std::shared_ptr<Codec2Client::Configurable> outputPoolIntf;
+ C2BlockPool::local_id_t outputPoolId;
+ {
+ Mutexed<BlockPools>::Locked pools(mBlockPools);
+ outputPoolId = pools->outputPoolId;
+ outputPoolIntf = pools->outputPoolIntf;
+ }
+
+ if (outputPoolIntf) {
+ if (mComponent->setOutputSurface(
+ outputPoolId,
+ producer,
+ generation) != C2_OK) {
+ ALOGI("[%s] setSurface: component setOutputSurface failed", mName);
+ return INVALID_OPERATION;
+ }
+ }
+
+ {
+ Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ output->surface = newSurface;
+ output->generation = generation;
+ }
+
+ return OK;
+}
+
+void CCodecBufferChannel::setMetaMode(MetaMode mode) {
+ mMetaMode = mode;
+}
+
+status_t toStatusT(c2_status_t c2s, c2_operation_t c2op) {
+ // C2_OK is always translated to OK.
+ if (c2s == C2_OK) {
+ return OK;
+ }
+
+ // Operation-dependent translation
+ // TODO: Add as necessary
+ switch (c2op) {
+ case C2_OPERATION_Component_start:
+ switch (c2s) {
+ case C2_NO_MEMORY:
+ return NO_MEMORY;
+ default:
+ return UNKNOWN_ERROR;
+ }
+ default:
+ break;
+ }
+
+ // Backup operation-agnostic translation
+ switch (c2s) {
+ case C2_BAD_INDEX:
+ return BAD_INDEX;
+ case C2_BAD_VALUE:
+ return BAD_VALUE;
+ case C2_BLOCKING:
+ return WOULD_BLOCK;
+ case C2_DUPLICATE:
+ return ALREADY_EXISTS;
+ case C2_NO_INIT:
+ return NO_INIT;
+ case C2_NO_MEMORY:
+ return NO_MEMORY;
+ case C2_NOT_FOUND:
+ return NAME_NOT_FOUND;
+ case C2_TIMED_OUT:
+ return TIMED_OUT;
+ case C2_BAD_STATE:
+ case C2_CANCELED:
+ case C2_CANNOT_DO:
+ case C2_CORRUPTED:
+ case C2_OMITTED:
+ case C2_REFUSED:
+ return UNKNOWN_ERROR;
+ default:
+ return -static_cast<status_t>(c2s);
+ }
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
new file mode 100644
index 0000000..431baaa
--- /dev/null
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CCODEC_BUFFER_CHANNEL_H_
+
+#define CCODEC_BUFFER_CHANNEL_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include <C2Buffer.h>
+#include <C2Component.h>
+#include <Codec2Mapper.h>
+
+#include <codec2/hidl/client.h>
+#include <media/stagefright/bqhelper/GraphicBufferSource.h>
+#include <media/stagefright/codec2/1.0/InputSurface.h>
+#include <media/stagefright/foundation/Mutexed.h>
+#include <media/stagefright/CodecBase.h>
+#include <media/ICrypto.h>
+
+#include "InputSurfaceWrapper.h"
+
+namespace android {
+
+class CCodecCallback {
+public:
+ virtual ~CCodecCallback() = default;
+ virtual void onError(status_t err, enum ActionCode actionCode) = 0;
+ virtual void onOutputFramesRendered(int64_t mediaTimeUs, nsecs_t renderTimeNs) = 0;
+ virtual void onWorkQueued(bool eos) = 0;
+ virtual void onOutputBuffersChanged() = 0;
+};
+
+/**
+ * BufferChannelBase implementation for CCodec.
+ */
+class CCodecBufferChannel
+ : public BufferChannelBase, public std::enable_shared_from_this<CCodecBufferChannel> {
+public:
+ explicit CCodecBufferChannel(const std::shared_ptr<CCodecCallback> &callback);
+ virtual ~CCodecBufferChannel();
+
+ // BufferChannelBase interface
+ virtual status_t queueInputBuffer(const sp<MediaCodecBuffer> &buffer) override;
+ virtual status_t queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer,
+ bool secure,
+ const uint8_t *key,
+ const uint8_t *iv,
+ CryptoPlugin::Mode mode,
+ CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples,
+ size_t numSubSamples,
+ AString *errorDetailMsg) override;
+ virtual status_t renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) override;
+ virtual status_t discardBuffer(const sp<MediaCodecBuffer> &buffer) override;
+ virtual void getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+ virtual void getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+
+ // Methods below are interface for CCodec to use.
+
+ /**
+ * Set the component object for buffer processing.
+ */
+ void setComponent(const std::shared_ptr<Codec2Client::Component> &component);
+
+ /**
+ * Set output graphic surface for rendering.
+ */
+ status_t setSurface(const sp<Surface> &surface);
+
+ /**
+ * Set GraphicBufferSource object from which the component extracts input
+ * buffers.
+ */
+ status_t setInputSurface(const std::shared_ptr<InputSurfaceWrapper> &surface);
+
+ /**
+ * Signal EOS to input surface.
+ */
+ status_t signalEndOfInputStream();
+
+ /**
+ * Set parameters.
+ */
+ status_t setParameters(std::vector<std::unique_ptr<C2Param>> ¶ms);
+
+ /**
+ * Start queueing buffers to the component. This object should never queue
+ * buffers before this call has completed.
+ */
+ status_t start(const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat);
+
+ /**
+ * Request initial input buffers to be filled by client.
+ */
+ status_t requestInitialInputBuffers();
+
+ /**
+ * Stop queueing buffers to the component. This object should never queue
+ * buffers after this call, until start() is called.
+ */
+ void stop();
+
+ void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork);
+
+ /**
+ * Notify input client about work done.
+ *
+ * @param workItems finished work item.
+ * @param outputFormat new output format if it has changed, otherwise nullptr
+ * @param initData new init data (CSD) if it has changed, otherwise nullptr
+ * @param numDiscardedInputBuffers the number of input buffers that are
+ * returned for the first time (not previously returned by
+ * onInputBufferDone()).
+ */
+ void onWorkDone(
+ std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
+ const C2StreamInitDataInfo::output *initData,
+ size_t numDiscardedInputBuffers);
+
+ /**
+ * Make an input buffer available for the client as it is no longer needed
+ * by the codec.
+ *
+ * @param buffer The buffer that becomes unused.
+ */
+ void onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer);
+
+ enum MetaMode {
+ MODE_NONE,
+ MODE_ANW,
+ };
+
+ void setMetaMode(MetaMode mode);
+
+ // Internal classes
+ class Buffers;
+ class InputBuffers;
+ class OutputBuffers;
+
+private:
+ class QueueGuard;
+
+ /**
+ * Special mutex-like object with the following properties:
+ *
+ * - At STOPPED state (initial, or after stop())
+ * - QueueGuard object gets created at STOPPED state, and the client is
+ * supposed to return immediately.
+ * - At RUNNING state (after start())
+ * - Each QueueGuard object
+ */
+ class QueueSync {
+ public:
+ /**
+ * At construction the sync object is in STOPPED state.
+ */
+ inline QueueSync() {}
+ ~QueueSync() = default;
+
+ /**
+ * Transition to RUNNING state when stopped. No-op if already in RUNNING
+ * state.
+ */
+ void start();
+
+ /**
+ * At RUNNING state, wait until all QueueGuard object created during
+ * RUNNING state are destroyed, and then transition to STOPPED state.
+ * No-op if already in STOPPED state.
+ */
+ void stop();
+
+ private:
+ Mutex mGuardLock;
+
+ struct Counter {
+ inline Counter() : value(-1) {}
+ int32_t value;
+ Condition cond;
+ };
+ Mutexed<Counter> mCount;
+
+ friend class CCodecBufferChannel::QueueGuard;
+ };
+
+ class QueueGuard {
+ public:
+ QueueGuard(QueueSync &sync);
+ ~QueueGuard();
+ inline bool isRunning() { return mRunning; }
+
+ private:
+ QueueSync &mSync;
+ bool mRunning;
+ };
+
+ void feedInputBufferIfAvailable();
+ void feedInputBufferIfAvailableInternal();
+ status_t queueInputBufferInternal(const sp<MediaCodecBuffer> &buffer);
+ bool handleWork(
+ std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
+ const C2StreamInitDataInfo::output *initData);
+ void sendOutputBuffers();
+
+ QueueSync mSync;
+ sp<MemoryDealer> mDealer;
+ sp<IMemory> mDecryptDestination;
+ int32_t mHeapSeqNum;
+
+ std::shared_ptr<Codec2Client::Component> mComponent;
+ std::string mComponentName; ///< component name for debugging
+ const char *mName; ///< C-string version of component name
+ std::shared_ptr<CCodecCallback> mCCodecCallback;
+ std::shared_ptr<C2BlockPool> mInputAllocator;
+ QueueSync mQueueSync;
+ std::vector<std::unique_ptr<C2Param>> mParamsToBeSet;
+
+ Mutexed<std::unique_ptr<InputBuffers>> mInputBuffers;
+ Mutexed<std::list<sp<ABuffer>>> mFlushedConfigs;
+ Mutexed<std::unique_ptr<OutputBuffers>> mOutputBuffers;
+
+ std::atomic_uint64_t mFrameIndex;
+ std::atomic_uint64_t mFirstValidFrameIndex;
+
+ sp<MemoryDealer> makeMemoryDealer(size_t heapSize);
+
+ struct OutputSurface {
+ sp<Surface> surface;
+ uint32_t generation;
+ };
+ Mutexed<OutputSurface> mOutputSurface;
+
+ struct BlockPools {
+ C2Allocator::id_t inputAllocatorId;
+ std::shared_ptr<C2BlockPool> inputPool;
+ C2Allocator::id_t outputAllocatorId;
+ C2BlockPool::local_id_t outputPoolId;
+ std::shared_ptr<Codec2Client::Configurable> outputPoolIntf;
+ };
+ Mutexed<BlockPools> mBlockPools;
+
+ std::shared_ptr<InputSurfaceWrapper> mInputSurface;
+
+ MetaMode mMetaMode;
+
+ // PipelineCapacity is used in the input buffer gating logic.
+ //
+ // There are three criteria that need to be met before
+ // onInputBufferAvailable() is called:
+ // 1. The number of input buffers that have been received by
+ // CCodecBufferChannel but not returned via onWorkDone() or
+ // onInputBufferDone() does not exceed a certain limit. (Let us call this
+ // number the "input" capacity.)
+ // 2. The number of work items that have been received by
+ // CCodecBufferChannel whose outputs have not been returned from the
+ // component (by calling onWorkDone()) does not exceed a certain limit.
+ // (Let us call this the "component" capacity.)
+ //
+ // These three criteria guarantee that a new input buffer that arrives from
+ // the invocation of onInputBufferAvailable() will not
+ // 1. overload CCodecBufferChannel's input buffers;
+ // 2. overload the component; or
+ //
+ struct PipelineCapacity {
+ // The number of available input capacity.
+ std::atomic_int input;
+ // The number of available component capacity.
+ std::atomic_int component;
+
+ PipelineCapacity();
+ // Set the values of #input and #component.
+ void initialize(int newInput, int newComponent,
+ const char* newName = "<UNKNOWN COMPONENT>",
+ const char* callerTag = nullptr);
+
+ // Return true and decrease #input and #component by one if
+ // they are all greater than zero; return false otherwise.
+ //
+ // callerTag is used for logging only.
+ //
+ // allocate() is called by CCodecBufferChannel to check whether it can
+ // receive another input buffer. If the return value is true,
+ // onInputBufferAvailable() and onOutputBufferAvailable() can be called
+ // afterwards.
+ bool allocate(const char* callerTag = nullptr);
+
+ // Increase #input and #component by one.
+ //
+ // callerTag is used for logging only.
+ //
+ // free() is called by CCodecBufferChannel after allocate() returns true
+ // but onInputBufferAvailable() cannot be called for any reasons. It
+ // essentially undoes an allocate() call.
+ void free(const char* callerTag = nullptr);
+
+ // Increase #input by @p numDiscardedInputBuffers.
+ //
+ // callerTag is used for logging only.
+ //
+ // freeInputSlots() is called by CCodecBufferChannel when onWorkDone()
+ // or onInputBufferDone() is called. @p numDiscardedInputBuffers is
+ // provided in onWorkDone(), and is 1 in onInputBufferDone().
+ int freeInputSlots(size_t numDiscardedInputBuffers,
+ const char* callerTag = nullptr);
+
+ // Increase #component by one and return the updated value.
+ //
+ // callerTag is used for logging only.
+ //
+ // freeComponentSlot() is called by CCodecBufferChannel when
+ // onWorkDone() is called.
+ int freeComponentSlot(const char* callerTag = nullptr);
+
+ private:
+ // Component name. Used for logging.
+ const char* mName;
+ };
+ PipelineCapacity mAvailablePipelineCapacity;
+
+ class ReorderStash {
+ public:
+ struct Entry {
+ inline Entry() : buffer(nullptr), timestamp(0), flags(0), ordinal({0, 0, 0}) {}
+ inline Entry(
+ const std::shared_ptr<C2Buffer> &b,
+ int64_t t,
+ int32_t f,
+ const C2WorkOrdinalStruct &o)
+ : buffer(b), timestamp(t), flags(f), ordinal(o) {}
+ std::shared_ptr<C2Buffer> buffer;
+ int64_t timestamp;
+ int32_t flags;
+ C2WorkOrdinalStruct ordinal;
+ };
+
+ ReorderStash();
+
+ void clear();
+ void setDepth(uint32_t depth);
+ void setKey(C2Config::ordinal_key_t key);
+ bool pop(Entry *entry);
+ void emplace(
+ const std::shared_ptr<C2Buffer> &buffer,
+ int64_t timestamp,
+ int32_t flags,
+ const C2WorkOrdinalStruct &ordinal);
+ void defer(const Entry &entry);
+ bool hasPending() const;
+
+ private:
+ std::list<Entry> mPending;
+ std::list<Entry> mStash;
+ uint32_t mDepth;
+ C2Config::ordinal_key_t mKey;
+
+ bool less(const C2WorkOrdinalStruct &o1, const C2WorkOrdinalStruct &o2);
+ };
+ Mutexed<ReorderStash> mReorderStash;
+
+ std::atomic_bool mInputMetEos;
+
+ inline bool hasCryptoOrDescrambler() {
+ return mCrypto != nullptr || mDescrambler != nullptr;
+ }
+};
+
+// Conversion of a c2_status_t value to a status_t value may depend on the
+// operation that returns the c2_status_t value.
+enum c2_operation_t {
+ C2_OPERATION_NONE,
+ C2_OPERATION_Component_connectToOmxInputSurface,
+ C2_OPERATION_Component_createBlockPool,
+ C2_OPERATION_Component_destroyBlockPool,
+ C2_OPERATION_Component_disconnectFromInputSurface,
+ C2_OPERATION_Component_drain,
+ C2_OPERATION_Component_flush,
+ C2_OPERATION_Component_queue,
+ C2_OPERATION_Component_release,
+ C2_OPERATION_Component_reset,
+ C2_OPERATION_Component_setOutputSurface,
+ C2_OPERATION_Component_start,
+ C2_OPERATION_Component_stop,
+ C2_OPERATION_ComponentStore_copyBuffer,
+ C2_OPERATION_ComponentStore_createComponent,
+ C2_OPERATION_ComponentStore_createInputSurface,
+ C2_OPERATION_ComponentStore_createInterface,
+ C2_OPERATION_Configurable_config,
+ C2_OPERATION_Configurable_query,
+ C2_OPERATION_Configurable_querySupportedParams,
+ C2_OPERATION_Configurable_querySupportedValues,
+ C2_OPERATION_InputSurface_connectToComponent,
+ C2_OPERATION_InputSurfaceConnection_disconnect,
+};
+
+status_t toStatusT(c2_status_t c2s, c2_operation_t c2op = C2_OPERATION_NONE);
+
+} // namespace android
+
+#endif // CCODEC_BUFFER_CHANNEL_H_
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
new file mode 100644
index 0000000..8dbfd0e
--- /dev/null
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -0,0 +1,1588 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CCodecConfig"
+#include <cutils/properties.h>
+#include <log/log.h>
+
+#include <C2Component.h>
+#include <C2Debug.h>
+#include <C2Param.h>
+#include <util/C2InterfaceHelper.h>
+
+#include <media/stagefright/MediaCodecConstants.h>
+
+#include "CCodecConfig.h"
+#include "Codec2Mapper.h"
+
+#define DRC_DEFAULT_MOBILE_REF_LEVEL 64 /* 64*-0.25dB = -16 dB below full scale for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_CUT 127 /* maximum compression of dynamic range for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_HEAVY 1 /* switch for heavy compression for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_EFFECT 3 /* MPEG-D DRC effect type; 3 => Limited playback range */
+#define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1) /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
+// names of properties that can be used to override the default DRC settings
+#define PROP_DRC_OVERRIDE_REF_LEVEL "aac_drc_reference_level"
+#define PROP_DRC_OVERRIDE_CUT "aac_drc_cut"
+#define PROP_DRC_OVERRIDE_BOOST "aac_drc_boost"
+#define PROP_DRC_OVERRIDE_HEAVY "aac_drc_heavy"
+#define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
+#define PROP_DRC_OVERRIDE_EFFECT "ro.aac_drc_effect_type"
+
+namespace android {
+
+// CCodecConfig
+
+namespace {
+
+/**
+ * mapping between SDK and Codec 2.0 configurations.
+ */
+struct ConfigMapper {
+ /**
+ * Value mapper (C2Value => C2Value)
+ */
+ typedef std::function<C2Value(C2Value)> Mapper;
+
+ /// shorthand
+ typedef CCodecConfig::Domain Domain;
+
+ ConfigMapper(std::string mediaKey, C2String c2struct, C2String c2field)
+ : mDomain(Domain::ALL), mMediaKey(mediaKey), mStruct(c2struct), mField(c2field) { }
+
+ /// Limits this parameter to the given domain
+ ConfigMapper &limitTo(uint32_t domain) {
+ C2_CHECK(domain & Domain::GUARD_BIT);
+ mDomain = Domain(mDomain & domain);
+ return *this;
+ }
+
+ /// Adds SDK => Codec 2.0 mapper (should not be in the SDK format)
+ ConfigMapper &withMapper(Mapper mapper) {
+ C2_CHECK(!mMapper);
+ C2_CHECK(!mReverse);
+ mMapper = mapper;
+ return *this;
+ }
+
+ /// Adds SDK <=> Codec 2.0 value mappers
+ ConfigMapper &withMappers(Mapper mapper, Mapper reverse) {
+ C2_CHECK(!mMapper);
+ C2_CHECK(!mReverse);
+ mMapper = mapper;
+ mReverse = reverse;
+ return *this;
+ }
+
+ /// Adds SDK <=> Codec 2.0 value mappers based on C2Mapper
+ template<typename C2Type, typename SdkType=int32_t>
+ ConfigMapper &withC2Mappers() {
+ C2_CHECK(!mMapper);
+ C2_CHECK(!mReverse);
+ mMapper = [](C2Value v) -> C2Value {
+ SdkType sdkValue;
+ C2Type c2Value;
+ if (v.get(&sdkValue) && C2Mapper::map(sdkValue, &c2Value)) {
+ return c2Value;
+ }
+ return C2Value();
+ };
+ mReverse = [](C2Value v) -> C2Value {
+ SdkType sdkValue;
+ C2Type c2Value;
+ using C2ValueType=typename _c2_reduce_enum_to_underlying_type<C2Type>::type;
+ if (v.get((C2ValueType*)&c2Value) && C2Mapper::map(c2Value, &sdkValue)) {
+ return sdkValue;
+ }
+ return C2Value();
+ };
+ return *this;
+ }
+
+ /// Maps from SDK values in an AMessage to a suitable C2Value.
+ C2Value mapFromMessage(const AMessage::ItemData &item) const {
+ C2Value value;
+ int32_t int32Value;
+ int64_t int64Value;
+ float floatValue;
+ double doubleValue;
+ if (item.find(&int32Value)) {
+ value = int32Value;
+ } else if (item.find(&int64Value)) {
+ value = int64Value;
+ } else if (item.find(&floatValue)) {
+ value = floatValue;
+ } else if (item.find(&doubleValue)) {
+ value = (float)doubleValue;
+ }
+ if (value.type() != C2Value::NO_INIT && mMapper) {
+ value = mMapper(value);
+ }
+ return value;
+ }
+
+ /// Maps from a C2Value to an SDK value in an AMessage.
+ AMessage::ItemData mapToMessage(C2Value value) const {
+ AMessage::ItemData item;
+ int32_t int32Value;
+ uint32_t uint32Value;
+ int64_t int64Value;
+ uint64_t uint64Value;
+ float floatValue;
+ if (value.type() != C2Value::NO_INIT && mReverse) {
+ value = mReverse(value);
+ }
+ if (value.get(&int32Value)) {
+ item.set(int32Value);
+ } else if (value.get(&uint32Value) && uint32Value <= uint32_t(INT32_MAX)) {
+ // SDK does not support unsigned values
+ item.set((int32_t)uint32Value);
+ } else if (value.get(&int64Value)) {
+ item.set(int64Value);
+ } else if (value.get(&uint64Value) && uint64Value <= uint64_t(INT64_MAX)) {
+ // SDK does not support unsigned values
+ item.set((int64_t)uint64Value);
+ } else if (value.get(&floatValue)) {
+ item.set(floatValue);
+ }
+ return item;
+ }
+
+ Domain domain() const { return mDomain; }
+ std::string mediaKey() const { return mMediaKey; }
+ std::string path() const { return mField.size() ? mStruct + '.' + mField : mStruct; }
+ Mapper mapper() const { return mMapper; }
+ Mapper reverse() const { return mReverse; }
+
+private:
+ Domain mDomain; ///< parameter domain (mask) containing port, kind and config domains
+ std::string mMediaKey; ///< SDK key
+ C2String mStruct; ///< Codec 2.0 struct name
+ C2String mField; ///< Codec 2.0 field name
+ Mapper mMapper; ///< optional SDK => Codec 2.0 value mapper
+ Mapper mReverse; ///< optional Codec 2.0 => SDK value mapper
+};
+
+template <typename PORT, typename STREAM>
+AString QueryMediaTypeImpl(
+ const std::shared_ptr<Codec2Client::Component> &component) {
+ AString mediaType;
+ std::vector<std::unique_ptr<C2Param>> queried;
+ c2_status_t c2err = component->query(
+ {}, { PORT::PARAM_TYPE, STREAM::PARAM_TYPE }, C2_DONT_BLOCK, &queried);
+ if (c2err != C2_OK && queried.size() == 0) {
+ ALOGD("Query media type failed => %s", asString(c2err));
+ } else {
+ PORT *portMediaType =
+ PORT::From(queried[0].get());
+ if (portMediaType) {
+ mediaType = AString(
+ portMediaType->m.value,
+ strnlen(portMediaType->m.value, portMediaType->flexCount()));
+ } else {
+ STREAM *streamMediaType = STREAM::From(queried[0].get());
+ if (streamMediaType) {
+ mediaType = AString(
+ streamMediaType->m.value,
+ strnlen(streamMediaType->m.value, streamMediaType->flexCount()));
+ }
+ }
+ ALOGD("read media type: %s", mediaType.c_str());
+ }
+ return mediaType;
+}
+
+AString QueryMediaType(
+ bool input, const std::shared_ptr<Codec2Client::Component> &component) {
+ typedef C2PortMediaTypeSetting P;
+ typedef C2StreamMediaTypeSetting S;
+ if (input) {
+ return QueryMediaTypeImpl<P::input, S::input>(component);
+ } else {
+ return QueryMediaTypeImpl<P::output, S::output>(component);
+ }
+}
+
+} // namespace
+
+/**
+ * Set of standard parameters used by CCodec that are exposed to MediaCodec.
+ */
+struct StandardParams {
+ typedef CCodecConfig::Domain Domain;
+
+ // standard (MediaCodec) params are keyed by media format key
+ typedef std::string SdkKey;
+
+ /// used to return reference to no config mappers in getConfigMappersForSdkKey
+ static const std::vector<ConfigMapper> NO_MAPPERS;
+
+ /// Returns Codec 2.0 equivalent parameters for an SDK format key.
+ const std::vector<ConfigMapper> &getConfigMappersForSdkKey(std::string key) const {
+ auto it = mConfigMappers.find(key);
+ if (it == mConfigMappers.end()) {
+ ALOGD("no c2 equivalents for %s", key.c_str());
+ return NO_MAPPERS;
+ }
+ ALOGV("found %zu eqs for %s", it->second.size(), key.c_str());
+ return it->second;
+ }
+
+ /**
+ * Adds a SDK <=> Codec 2.0 parameter mapping. Multiple Codec 2.0 parameters may map to a
+ * single SDK key, in which case they shall be ordered from least authoritative to most
+ * authoritative. When constructing SDK formats, the last mapped Codec 2.0 parameter that
+ * is supported by the component will determine the exposed value. (TODO: perhaps restrict this
+ * by domain.)
+ */
+ void add(const ConfigMapper &cm) {
+ auto it = mConfigMappers.find(cm.mediaKey());
+ ALOGV("%c%c%c%c %c%c%c %04x %9s %s => %s",
+ ((cm.domain() & Domain::IS_INPUT) ? 'I' : ' '),
+ ((cm.domain() & Domain::IS_OUTPUT) ? 'O' : ' '),
+ ((cm.domain() & Domain::IS_CODED) ? 'C' : ' '),
+ ((cm.domain() & Domain::IS_RAW) ? 'R' : ' '),
+ ((cm.domain() & Domain::IS_CONFIG) ? 'c' : ' '),
+ ((cm.domain() & Domain::IS_PARAM) ? 'p' : ' '),
+ ((cm.domain() & Domain::IS_READ) ? 'r' : ' '),
+ cm.domain(),
+ it == mConfigMappers.end() ? "adding" : "extending",
+ cm.mediaKey().c_str(), cm.path().c_str());
+ if (it == mConfigMappers.end()) {
+ std::vector<ConfigMapper> eqs = { cm };
+ mConfigMappers.emplace(cm.mediaKey(), eqs);
+ } else {
+ it->second.push_back(cm);
+ }
+ }
+
+ /**
+ * Returns all paths for a specific domain.
+ *
+ * \param any maximum domain mask. Returned parameters must match at least one of the domains
+ * in the mask.
+ * \param all minimum domain mask. Returned parameters must match all of the domains in the
+ * mask. This is restricted to the bits of the maximum mask.
+ */
+ std::vector<std::string> getPathsForDomain(
+ Domain any, Domain all = Domain::ALL) const {
+ std::vector<std::string> res;
+ for (const std::pair<std::string, std::vector<ConfigMapper>> &el : mConfigMappers) {
+ for (const ConfigMapper &cm : el.second) {
+ ALOGV("filtering %s %x %x %x %x", cm.path().c_str(), cm.domain(), any,
+ (cm.domain() & any), (cm.domain() & any & all));
+ if ((cm.domain() & any) && ((cm.domain() & any & all) == (any & all))) {
+ res.push_back(cm.path());
+ }
+ }
+ }
+ return res;
+ }
+
+ /**
+ * Returns SDK <=> Codec 2.0 mappings.
+ *
+ * TODO: replace these with better methods as this exposes the inner structure.
+ */
+ const std::map<SdkKey, std::vector<ConfigMapper>> getKeys() const {
+ return mConfigMappers;
+ }
+
+private:
+ std::map<SdkKey, std::vector<ConfigMapper>> mConfigMappers;
+};
+
+const std::vector<ConfigMapper> StandardParams::NO_MAPPERS;
+
+
+CCodecConfig::CCodecConfig()
+ : mInputFormat(new AMessage),
+ mOutputFormat(new AMessage),
+ mUsingSurface(false) { }
+
+void CCodecConfig::initializeStandardParams() {
+ typedef Domain D;
+ mStandardParams = std::make_shared<StandardParams>();
+ std::function<void(const ConfigMapper &)> add =
+ [params = mStandardParams](const ConfigMapper &cm) {
+ params->add(cm);
+ };
+ std::function<void(const ConfigMapper &)> deprecated = add;
+
+ // allow int32 or float SDK values and represent them as float
+ ConfigMapper::Mapper makeFloat = [](C2Value v) -> C2Value {
+ // convert from i32 to float
+ int32_t i32Value;
+ float fpValue;
+ if (v.get(&i32Value)) {
+ return (float)i32Value;
+ } else if (v.get(&fpValue)) {
+ return fpValue;
+ }
+ return C2Value();
+ };
+
+ ConfigMapper::Mapper negate = [](C2Value v) -> C2Value {
+ int32_t value;
+ if (v.get(&value)) {
+ return -value;
+ }
+ return C2Value();
+ };
+
+ add(ConfigMapper(KEY_MIME, C2_PARAMKEY_INPUT_MEDIA_TYPE, "value")
+ .limitTo(D::INPUT & D::READ));
+ add(ConfigMapper(KEY_MIME, C2_PARAMKEY_OUTPUT_MEDIA_TYPE, "value")
+ .limitTo(D::OUTPUT & D::READ));
+
+ add(ConfigMapper(KEY_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
+ .limitTo(D::ENCODER & D::OUTPUT));
+ // we also need to put the bitrate in the max bitrate field
+ add(ConfigMapper(KEY_MAX_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
+ .limitTo(D::ENCODER & D::READ & D::OUTPUT));
+ add(ConfigMapper(PARAMETER_KEY_VIDEO_BITRATE, C2_PARAMKEY_BITRATE, "value")
+ .limitTo(D::ENCODER & D::VIDEO & D::PARAM));
+ add(ConfigMapper(KEY_BITRATE_MODE, C2_PARAMKEY_BITRATE_MODE, "value")
+ .limitTo(D::ENCODER & D::CODED)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ C2Config::bitrate_mode_t mode;
+ if (v.get(&value) && C2Mapper::map(value, &mode)) {
+ return mode;
+ }
+ return C2Value();
+ }));
+ // remove when codecs switch to PARAMKEY and new modes
+ deprecated(ConfigMapper(KEY_BITRATE_MODE, "coded.bitrate-mode", "value")
+ .limitTo(D::ENCODER));
+ add(ConfigMapper(KEY_FRAME_RATE, C2_PARAMKEY_FRAME_RATE, "value")
+ .limitTo(D::VIDEO)
+ .withMappers(makeFloat, [](C2Value v) -> C2Value {
+ // read back always as int
+ float value;
+ if (v.get(&value)) {
+ return (int32_t)value;
+ }
+ return C2Value();
+ }));
+
+ add(ConfigMapper(KEY_MAX_INPUT_SIZE, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE, "value")
+ .limitTo(D::INPUT));
+ // remove when codecs switch to PARAMKEY
+ deprecated(ConfigMapper(KEY_MAX_INPUT_SIZE, "coded.max-frame-size", "value")
+ .limitTo(D::INPUT));
+
+ // Rotation
+ // Note: SDK rotation is clock-wise, while C2 rotation is counter-clock-wise
+ add(ConfigMapper(KEY_ROTATION, C2_PARAMKEY_VUI_ROTATION, "value")
+ .limitTo(D::VIDEO & D::CODED)
+ .withMappers(negate, negate));
+ add(ConfigMapper(KEY_ROTATION, C2_PARAMKEY_ROTATION, "value")
+ .limitTo(D::VIDEO & D::RAW)
+ .withMappers(negate, negate));
+
+ // android 'video-scaling'
+ add(ConfigMapper("android._video-scaling", C2_PARAMKEY_SURFACE_SCALING_MODE, "value")
+ .limitTo(D::VIDEO & D::DECODER & D::RAW));
+
+ // Color Aspects
+ //
+ // configure default for decoders
+ add(ConfigMapper(KEY_COLOR_RANGE, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS, "range")
+ .limitTo((D::VIDEO | D::IMAGE) & D::DECODER & D::CODED & (D::CONFIG | D::PARAM))
+ .withC2Mappers<C2Color::range_t>());
+ add(ConfigMapper(KEY_COLOR_TRANSFER, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS, "transfer")
+ .limitTo((D::VIDEO | D::IMAGE) & D::DECODER & D::CODED & (D::CONFIG | D::PARAM))
+ .withC2Mappers<C2Color::transfer_t>());
+ add(ConfigMapper("color-primaries", C2_PARAMKEY_DEFAULT_COLOR_ASPECTS, "primaries")
+ .limitTo((D::VIDEO | D::IMAGE) & D::DECODER & D::CODED & (D::CONFIG | D::PARAM)));
+ add(ConfigMapper("color-matrix", C2_PARAMKEY_DEFAULT_COLOR_ASPECTS, "matrix")
+ .limitTo((D::VIDEO | D::IMAGE) & D::DECODER & D::CODED & (D::CONFIG | D::PARAM)));
+
+ // read back final for decoder output (also, configure final aspects as well. This should be
+ // overwritten based on coded/default values if component supports color aspects, but is used
+ // as final values if component does not support aspects at all)
+ add(ConfigMapper(KEY_COLOR_RANGE, C2_PARAMKEY_COLOR_ASPECTS, "range")
+ .limitTo((D::VIDEO | D::IMAGE) & D::DECODER & D::RAW)
+ .withC2Mappers<C2Color::range_t>());
+ add(ConfigMapper(KEY_COLOR_TRANSFER, C2_PARAMKEY_COLOR_ASPECTS, "transfer")
+ .limitTo((D::VIDEO | D::IMAGE) & D::DECODER & D::RAW)
+ .withC2Mappers<C2Color::transfer_t>());
+ add(ConfigMapper("color-primaries", C2_PARAMKEY_COLOR_ASPECTS, "primaries")
+ .limitTo((D::VIDEO | D::IMAGE) & D::DECODER & D::RAW));
+ add(ConfigMapper("color-matrix", C2_PARAMKEY_COLOR_ASPECTS, "matrix")
+ .limitTo((D::VIDEO | D::IMAGE) & D::DECODER & D::RAW));
+
+ // configure source aspects for encoders and read them back on the coded(!) port.
+ // This is to ensure muxing the desired aspects into the container.
+ add(ConfigMapper(KEY_COLOR_RANGE, C2_PARAMKEY_COLOR_ASPECTS, "range")
+ .limitTo((D::VIDEO | D::IMAGE) & D::ENCODER & D::CODED)
+ .withC2Mappers<C2Color::range_t>());
+ add(ConfigMapper(KEY_COLOR_TRANSFER, C2_PARAMKEY_COLOR_ASPECTS, "transfer")
+ .limitTo((D::VIDEO | D::IMAGE) & D::ENCODER & D::CODED)
+ .withC2Mappers<C2Color::transfer_t>());
+ add(ConfigMapper("color-primaries", C2_PARAMKEY_COLOR_ASPECTS, "primaries")
+ .limitTo((D::VIDEO | D::IMAGE) & D::ENCODER & D::CODED));
+ add(ConfigMapper("color-matrix", C2_PARAMKEY_COLOR_ASPECTS, "matrix")
+ .limitTo((D::VIDEO | D::IMAGE) & D::ENCODER & D::CODED));
+
+ // read back coded aspects for encoders (on the raw port), but also configure
+ // desired aspects here.
+ add(ConfigMapper(KEY_COLOR_RANGE, C2_PARAMKEY_VUI_COLOR_ASPECTS, "range")
+ .limitTo((D::VIDEO | D::IMAGE) & D::ENCODER & D::RAW)
+ .withC2Mappers<C2Color::range_t>());
+ add(ConfigMapper(KEY_COLOR_TRANSFER, C2_PARAMKEY_VUI_COLOR_ASPECTS, "transfer")
+ .limitTo((D::VIDEO | D::IMAGE) & D::ENCODER & D::RAW)
+ .withC2Mappers<C2Color::transfer_t>());
+ add(ConfigMapper("color-primaries", C2_PARAMKEY_VUI_COLOR_ASPECTS, "primaries")
+ .limitTo((D::VIDEO | D::IMAGE) & D::ENCODER & D::RAW));
+ add(ConfigMapper("color-matrix", C2_PARAMKEY_VUI_COLOR_ASPECTS, "matrix")
+ .limitTo((D::VIDEO | D::IMAGE) & D::ENCODER & D::RAW));
+
+ // Dataspace
+ add(ConfigMapper("android._dataspace", C2_PARAMKEY_DATA_SPACE, "value")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+
+ // HDR
+ add(ConfigMapper("smpte2086.red.x", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.red.x")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.red.y", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.red.y")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.green.x", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.green.x")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.green.y", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.green.y")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.blue.x", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.blue.x")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.blue.y", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.blue.y")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.white.x", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.white.x")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.white.y", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.white.y")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.max-luminance", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.max-luminance")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("smpte2086.min-luminance", C2_PARAMKEY_HDR_STATIC_INFO, "mastering.min-luminance")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("cta861.max-cll", C2_PARAMKEY_HDR_STATIC_INFO, "max-cll")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper("cta861.max-fall", C2_PARAMKEY_HDR_STATIC_INFO, "max-fall")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+
+ add(ConfigMapper(std::string(KEY_FEATURE_) + FEATURE_SecurePlayback,
+ C2_PARAMKEY_SECURE_MODE, "value"));
+
+ add(ConfigMapper("prepend-sps-pps-to-idr-frames",
+ C2_PARAMKEY_PREPEND_HEADER_MODE, "value")
+ .limitTo(D::ENCODER & D::VIDEO)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ if (v.get(&value) && value) {
+ return C2Value(C2Config::PREPEND_HEADER_TO_ALL_SYNC);
+ } else {
+ return C2Value(C2Config::PREPEND_HEADER_TO_NONE);
+ }
+ }));
+ // remove when codecs switch to PARAMKEY
+ deprecated(ConfigMapper("prepend-sps-pps-to-idr-frames",
+ "coding.add-csd-to-sync-frames", "value")
+ .limitTo(D::ENCODER & D::VIDEO));
+ // convert to timestamp base
+ add(ConfigMapper(KEY_I_FRAME_INTERVAL, C2_PARAMKEY_SYNC_FRAME_INTERVAL, "value")
+ .withMapper([](C2Value v) -> C2Value {
+ // convert from i32 to float
+ int32_t i32Value;
+ float fpValue;
+ if (v.get(&i32Value)) {
+ return int64_t(1000000) * i32Value;
+ } else if (v.get(&fpValue)) {
+ return int64_t(c2_min(1000000 * fpValue + 0.5, (double)INT64_MAX));
+ }
+ return C2Value();
+ }));
+ // remove when codecs switch to proper coding.gop (add support for calculating gop)
+ deprecated(ConfigMapper("i-frame-period", "coding.gop", "intra-period")
+ .limitTo(D::ENCODER & D::VIDEO));
+ add(ConfigMapper(KEY_INTRA_REFRESH_PERIOD, C2_PARAMKEY_INTRA_REFRESH, "period")
+ .limitTo(D::VIDEO & D::ENCODER)
+ .withMappers(makeFloat, [](C2Value v) -> C2Value {
+ // read back always as int
+ float value;
+ if (v.get(&value)) {
+ return (int32_t)value;
+ }
+ return C2Value();
+ }));
+ add(ConfigMapper(KEY_QUALITY, C2_PARAMKEY_QUALITY, "value"));
+ deprecated(ConfigMapper(PARAMETER_KEY_REQUEST_SYNC_FRAME,
+ "coding.request-sync", "value")
+ .limitTo(D::PARAM & D::ENCODER)
+ .withMapper([](C2Value) -> C2Value { return uint32_t(1); }));
+ add(ConfigMapper(PARAMETER_KEY_REQUEST_SYNC_FRAME,
+ C2_PARAMKEY_REQUEST_SYNC_FRAME, "value")
+ .limitTo(D::PARAM & D::ENCODER)
+ .withMapper([](C2Value) -> C2Value { return uint32_t(1); }));
+
+ add(ConfigMapper(KEY_OPERATING_RATE, C2_PARAMKEY_OPERATING_RATE, "value")
+ .limitTo(D::PARAM | D::CONFIG) // write-only
+ .withMapper(makeFloat));
+ // C2 priorities are inverted
+ add(ConfigMapper(KEY_PRIORITY, C2_PARAMKEY_PRIORITY, "value")
+ .withMappers(negate, negate));
+ // remove when codecs switch to PARAMKEY
+ deprecated(ConfigMapper(KEY_OPERATING_RATE, "ctrl.operating-rate", "value")
+ .withMapper(makeFloat));
+ deprecated(ConfigMapper(KEY_PRIORITY, "ctrl.priority", "value"));
+
+ add(ConfigMapper(KEY_WIDTH, C2_PARAMKEY_PICTURE_SIZE, "width")
+ .limitTo(D::VIDEO | D::IMAGE));
+ add(ConfigMapper(KEY_HEIGHT, C2_PARAMKEY_PICTURE_SIZE, "height")
+ .limitTo(D::VIDEO | D::IMAGE));
+
+ add(ConfigMapper("crop-left", C2_PARAMKEY_CROP_RECT, "left")
+ .limitTo(D::VIDEO | D::IMAGE));
+ add(ConfigMapper("crop-top", C2_PARAMKEY_CROP_RECT, "top")
+ .limitTo(D::VIDEO | D::IMAGE));
+ add(ConfigMapper("crop-width", C2_PARAMKEY_CROP_RECT, "width")
+ .limitTo(D::VIDEO | D::IMAGE));
+ add(ConfigMapper("crop-height", C2_PARAMKEY_CROP_RECT, "height")
+ .limitTo(D::VIDEO | D::IMAGE));
+
+ add(ConfigMapper(KEY_MAX_WIDTH, C2_PARAMKEY_MAX_PICTURE_SIZE, "width")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+ add(ConfigMapper(KEY_MAX_HEIGHT, C2_PARAMKEY_MAX_PICTURE_SIZE, "height")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
+
+ add(ConfigMapper("csd-0", C2_PARAMKEY_INIT_DATA, "value")
+ .limitTo(D::OUTPUT & D::READ));
+
+ add(ConfigMapper(C2_PARAMKEY_TEMPORAL_LAYERING, C2_PARAMKEY_TEMPORAL_LAYERING, "")
+ .limitTo(D::ENCODER & D::VIDEO & D::OUTPUT));
+
+ // Pixel Format (use local key for actual pixel format as we don't distinguish between
+ // SDK layouts for flexible format and we need the actual SDK color format in the media format)
+ add(ConfigMapper("android._color-format", C2_PARAMKEY_PIXEL_FORMAT, "value")
+ .limitTo((D::VIDEO | D::IMAGE) & D::RAW)
+ .withMappers([](C2Value v) -> C2Value {
+ int32_t value;
+ if (v.get(&value)) {
+ switch (value) {
+ case COLOR_FormatSurface:
+ return (uint32_t)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ case COLOR_FormatYUV420Flexible:
+ return (uint32_t)HAL_PIXEL_FORMAT_YCBCR_420_888;
+ case COLOR_FormatYUV420Planar:
+ case COLOR_FormatYUV420SemiPlanar:
+ case COLOR_FormatYUV420PackedPlanar:
+ case COLOR_FormatYUV420PackedSemiPlanar:
+ return (uint32_t)HAL_PIXEL_FORMAT_YV12;
+ default:
+ // TODO: support some sort of passthrough
+ break;
+ }
+ }
+ return C2Value();
+ }, [](C2Value v) -> C2Value {
+ uint32_t value;
+ if (v.get(&value)) {
+ switch (value) {
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+ return COLOR_FormatSurface;
+ case HAL_PIXEL_FORMAT_YV12:
+ case HAL_PIXEL_FORMAT_YCBCR_420_888:
+ return COLOR_FormatYUV420Flexible;
+ default:
+ // TODO: support some sort of passthrough
+ break;
+ }
+ }
+ return C2Value();
+ }));
+
+ add(ConfigMapper(KEY_CHANNEL_COUNT, C2_PARAMKEY_CHANNEL_COUNT, "value")
+ .limitTo(D::AUDIO)); // read back to both formats
+ add(ConfigMapper(KEY_CHANNEL_COUNT, C2_PARAMKEY_CODED_CHANNEL_COUNT, "value")
+ .limitTo(D::AUDIO & D::CODED));
+
+ add(ConfigMapper(KEY_SAMPLE_RATE, C2_PARAMKEY_SAMPLE_RATE, "value")
+ .limitTo(D::AUDIO)); // read back to both port formats
+ add(ConfigMapper(KEY_SAMPLE_RATE, C2_PARAMKEY_CODED_SAMPLE_RATE, "value")
+ .limitTo(D::AUDIO & D::CODED));
+
+ add(ConfigMapper(KEY_PCM_ENCODING, C2_PARAMKEY_PCM_ENCODING, "value")
+ .limitTo(D::AUDIO));
+
+ add(ConfigMapper(KEY_IS_ADTS, C2_PARAMKEY_AAC_PACKAGING, "value")
+ .limitTo(D::AUDIO & D::CODED)
+ .withMappers([](C2Value v) -> C2Value {
+ int32_t value;
+ if (v.get(&value) && value) {
+ return C2Config::AAC_PACKAGING_ADTS;
+ }
+ return C2Value();
+ }, [](C2Value v) -> C2Value {
+ uint32_t value;
+ if (v.get(&value) && value == C2Config::AAC_PACKAGING_ADTS) {
+ return (int32_t)1;
+ }
+ return C2Value();
+ }));
+
+ std::shared_ptr<C2Mapper::ProfileLevelMapper> mapper =
+ C2Mapper::GetProfileLevelMapper(mCodingMediaType);
+
+ add(ConfigMapper(KEY_PROFILE, C2_PARAMKEY_PROFILE_LEVEL, "profile")
+ .limitTo(D::CODED)
+ .withMappers([mapper](C2Value v) -> C2Value {
+ C2Config::profile_t c2 = PROFILE_UNUSED;
+ int32_t sdk;
+ if (mapper && v.get(&sdk) && mapper->mapProfile(sdk, &c2)) {
+ return c2;
+ }
+ return PROFILE_UNUSED;
+ }, [mapper](C2Value v) -> C2Value {
+ C2Config::profile_t c2;
+ int32_t sdk;
+ using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(c2)>::type;
+ if (mapper && v.get((C2ValueType*)&c2) && mapper->mapProfile(c2, &sdk)) {
+ return sdk;
+ }
+ return C2Value();
+ }));
+
+ add(ConfigMapper(KEY_LEVEL, C2_PARAMKEY_PROFILE_LEVEL, "level")
+ .limitTo(D::CODED)
+ .withMappers([mapper](C2Value v) -> C2Value {
+ C2Config::level_t c2 = LEVEL_UNUSED;
+ int32_t sdk;
+ if (mapper && v.get(&sdk) && mapper->mapLevel(sdk, &c2)) {
+ return c2;
+ }
+ return LEVEL_UNUSED;
+ }, [mapper](C2Value v) -> C2Value {
+ C2Config::level_t c2;
+ int32_t sdk;
+ using C2ValueType=typename _c2_reduce_enum_to_underlying_type<decltype(c2)>::type;
+ if (mapper && v.get((C2ValueType*)&c2) && mapper->mapLevel(c2, &sdk)) {
+ return sdk;
+ }
+ return C2Value();
+ }));
+
+ // convert to dBFS and add default
+ add(ConfigMapper(KEY_AAC_DRC_TARGET_REFERENCE_LEVEL, C2_PARAMKEY_DRC_TARGET_REFERENCE_LEVEL, "value")
+ .limitTo(D::AUDIO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ if (!v.get(&value) || value < 0) {
+ value = property_get_int32(PROP_DRC_OVERRIDE_REF_LEVEL, DRC_DEFAULT_MOBILE_REF_LEVEL);
+ }
+ return float(-0.25 * c2_min(value, 127));
+ }));
+
+ // convert to 0-1 (%) and add default
+ add(ConfigMapper(KEY_AAC_DRC_ATTENUATION_FACTOR, C2_PARAMKEY_DRC_ATTENUATION_FACTOR, "value")
+ .limitTo(D::AUDIO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ if (!v.get(&value) || value < 0) {
+ value = property_get_int32(PROP_DRC_OVERRIDE_CUT, DRC_DEFAULT_MOBILE_DRC_CUT);
+ }
+ return float(c2_min(value, 127) / 127.);
+ }));
+
+ // convert to 0-1 (%) and add default
+ add(ConfigMapper(KEY_AAC_DRC_BOOST_FACTOR, C2_PARAMKEY_DRC_BOOST_FACTOR, "value")
+ .limitTo(D::AUDIO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ if (!v.get(&value) || value < 0) {
+ value = property_get_int32(PROP_DRC_OVERRIDE_BOOST, DRC_DEFAULT_MOBILE_DRC_BOOST);
+ }
+ return float(c2_min(value, 127) / 127.);
+ }));
+
+ // convert to compression type and add default
+ add(ConfigMapper(KEY_AAC_DRC_HEAVY_COMPRESSION, C2_PARAMKEY_DRC_COMPRESSION_MODE, "value")
+ .limitTo(D::AUDIO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ if (!v.get(&value) || value < 0) {
+ value = property_get_int32(PROP_DRC_OVERRIDE_HEAVY, DRC_DEFAULT_MOBILE_DRC_HEAVY);
+ }
+ return value == 1 ? C2Config::DRC_COMPRESSION_HEAVY : C2Config::DRC_COMPRESSION_LIGHT;
+ }));
+
+ // convert to dBFS and add default
+ add(ConfigMapper(KEY_AAC_ENCODED_TARGET_LEVEL, C2_PARAMKEY_DRC_ENCODED_TARGET_LEVEL, "value")
+ .limitTo(D::AUDIO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ if (!v.get(&value) || value < 0) {
+ value = property_get_int32(PROP_DRC_OVERRIDE_ENC_LEVEL, DRC_DEFAULT_MOBILE_ENC_LEVEL);
+ }
+ return float(-0.25 * c2_min(value, 127));
+ }));
+
+ // convert to effect type (these map to SDK values) and add default
+ add(ConfigMapper(KEY_AAC_DRC_EFFECT_TYPE, C2_PARAMKEY_DRC_EFFECT_TYPE, "value")
+ .limitTo(D::AUDIO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ if (!v.get(&value) || value < -1 || value > 8) {
+ value = property_get_int32(PROP_DRC_OVERRIDE_EFFECT, DRC_DEFAULT_MOBILE_DRC_EFFECT);
+ // ensure value is within range
+ if (value < -1 || value > 8) {
+ value = DRC_DEFAULT_MOBILE_DRC_EFFECT;
+ }
+ }
+ return value;
+ }));
+
+ add(ConfigMapper(KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT, C2_PARAMKEY_MAX_CHANNEL_COUNT, "value")
+ .limitTo(D::AUDIO));
+
+ add(ConfigMapper(KEY_AAC_SBR_MODE, C2_PARAMKEY_AAC_SBR_MODE, "value")
+ .limitTo(D::AUDIO & D::ENCODER & D::CONFIG)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value;
+ if (!v.get(&value) || value < 0) {
+ return C2Config::AAC_SBR_AUTO;
+ }
+ switch (value) {
+ case 0: return C2Config::AAC_SBR_OFF;
+ case 1: return C2Config::AAC_SBR_SINGLE_RATE;
+ case 2: return C2Config::AAC_SBR_DUAL_RATE;
+ default: return C2Config::AAC_SBR_AUTO + 1; // invalid value
+ }
+ }));
+
+ add(ConfigMapper(KEY_QUALITY, C2_PARAMKEY_QUALITY, "value"));
+ add(ConfigMapper(KEY_FLAC_COMPRESSION_LEVEL, C2_PARAMKEY_COMPLEXITY, "value")
+ .limitTo(D::AUDIO & D::ENCODER));
+ add(ConfigMapper("complexity", C2_PARAMKEY_COMPLEXITY, "value")
+ .limitTo(D::ENCODER));
+
+ add(ConfigMapper(KEY_GRID_COLUMNS, C2_PARAMKEY_TILE_LAYOUT, "columns")
+ .limitTo(D::IMAGE));
+ add(ConfigMapper(KEY_GRID_ROWS, C2_PARAMKEY_TILE_LAYOUT, "rows")
+ .limitTo(D::IMAGE));
+ add(ConfigMapper(KEY_TILE_WIDTH, C2_PARAMKEY_TILE_LAYOUT, "tile.width")
+ .limitTo(D::IMAGE));
+ add(ConfigMapper(KEY_TILE_HEIGHT, C2_PARAMKEY_TILE_LAYOUT, "tile.height")
+ .limitTo(D::IMAGE));
+
+ add(ConfigMapper(KEY_LATENCY, C2_PARAMKEY_PIPELINE_DELAY_REQUEST, "value")
+ .limitTo(D::VIDEO & D::ENCODER));
+
+ add(ConfigMapper(C2_PARAMKEY_INPUT_TIME_STRETCH, C2_PARAMKEY_INPUT_TIME_STRETCH, "value"));
+
+ /* still to do
+ constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
+
+ not yet used by MediaCodec, but defined as MediaFormat
+ KEY_AUDIO_SESSION_ID // we use "audio-hw-sync"
+ KEY_OUTPUT_REORDER_DEPTH
+ */
+}
+
+status_t CCodecConfig::initialize(
+ const std::shared_ptr<Codec2Client> &client,
+ const std::shared_ptr<Codec2Client::Component> &component) {
+ C2ComponentDomainSetting domain(C2Component::DOMAIN_OTHER);
+ C2ComponentKindSetting kind(C2Component::KIND_OTHER);
+
+ std::vector<std::unique_ptr<C2Param>> queried;
+ c2_status_t c2err = component->query({ &domain, &kind }, {}, C2_DONT_BLOCK, &queried);
+ if (c2err != C2_OK) {
+ ALOGD("Query domain & kind failed => %s", asString(c2err));
+ // TEMP: determine kind from component name
+ if (kind.value == C2Component::KIND_OTHER) {
+ if (component->getName().find("encoder") != std::string::npos) {
+ kind.value = C2Component::KIND_ENCODER;
+ } else if (component->getName().find("decoder") != std::string::npos) {
+ kind.value = C2Component::KIND_DECODER;
+ }
+ }
+
+ // TEMP: determine domain from media type (port (preferred) or stream #0)
+ if (domain.value == C2Component::DOMAIN_OTHER) {
+ AString mediaType = QueryMediaType(true /* input */, component);
+ if (mediaType.startsWith("audio/")) {
+ domain.value = C2Component::DOMAIN_AUDIO;
+ } else if (mediaType.startsWith("video/")) {
+ domain.value = C2Component::DOMAIN_VIDEO;
+ } else if (mediaType.startsWith("image/")) {
+ domain.value = C2Component::DOMAIN_IMAGE;
+ }
+ }
+ }
+
+ mDomain = (domain.value == C2Component::DOMAIN_VIDEO ? Domain::IS_VIDEO :
+ domain.value == C2Component::DOMAIN_IMAGE ? Domain::IS_IMAGE :
+ domain.value == C2Component::DOMAIN_AUDIO ? Domain::IS_AUDIO : Domain::OTHER_DOMAIN)
+ | (kind.value == C2Component::KIND_DECODER ? Domain::IS_DECODER :
+ kind.value == C2Component::KIND_ENCODER ? Domain::IS_ENCODER : Domain::OTHER_KIND);
+
+ mInputDomain = Domain(((mDomain & IS_DECODER) ? IS_CODED : IS_RAW) | IS_INPUT);
+ mOutputDomain = Domain(((mDomain & IS_ENCODER) ? IS_CODED : IS_RAW) | IS_OUTPUT);
+
+ ALOGV("domain is %#x (%u %u)", mDomain, domain.value, kind.value);
+
+ std::vector<C2Param::Index> paramIndices;
+ switch (kind.value) {
+ case C2Component::KIND_DECODER:
+ mCodingMediaType = QueryMediaType(true /* input */, component).c_str();
+ break;
+ case C2Component::KIND_ENCODER:
+ mCodingMediaType = QueryMediaType(false /* input */, component).c_str();
+ break;
+ default:
+ mCodingMediaType = "";
+ }
+
+ c2err = component->querySupportedParams(&mParamDescs);
+ if (c2err != C2_OK) {
+ ALOGD("Query supported params failed after returning %zu values => %s",
+ mParamDescs.size(), asString(c2err));
+ return UNKNOWN_ERROR;
+ }
+ for (const std::shared_ptr<C2ParamDescriptor> &desc : mParamDescs) {
+ mSupportedIndices.emplace(desc->index());
+ }
+
+ mReflector = client->getParamReflector();
+ if (mReflector == nullptr) {
+ ALOGE("Failed to get param reflector");
+ return UNKNOWN_ERROR;
+ }
+
+ // enumerate all fields
+ mParamUpdater = std::make_shared<ReflectedParamUpdater>();
+ mParamUpdater->clear();
+ mParamUpdater->supportWholeParam(
+ C2_PARAMKEY_TEMPORAL_LAYERING, C2StreamTemporalLayeringTuning::CORE_INDEX);
+ mParamUpdater->addParamDesc(mReflector, mParamDescs);
+
+ // TEMP: add some standard fields even if not reflected
+ if (kind.value == C2Component::KIND_ENCODER) {
+ mParamUpdater->addStandardParam<C2StreamInitDataInfo::output>(C2_PARAMKEY_INIT_DATA);
+ }
+ if (domain.value == C2Component::DOMAIN_IMAGE || domain.value == C2Component::DOMAIN_VIDEO) {
+ if (kind.value != C2Component::KIND_ENCODER) {
+ addLocalParam<C2StreamPictureSizeInfo::output>(C2_PARAMKEY_PICTURE_SIZE);
+ addLocalParam<C2StreamCropRectInfo::output>(C2_PARAMKEY_CROP_RECT);
+ addLocalParam(
+ new C2StreamPixelAspectRatioInfo::output(0u, 1u, 1u),
+ C2_PARAMKEY_PIXEL_ASPECT_RATIO);
+ addLocalParam(new C2StreamRotationInfo::output(0u, 0), C2_PARAMKEY_ROTATION);
+ addLocalParam(new C2StreamColorAspectsInfo::output(0u), C2_PARAMKEY_COLOR_ASPECTS);
+ addLocalParam<C2StreamDataSpaceInfo::output>(C2_PARAMKEY_DATA_SPACE);
+ addLocalParam<C2StreamHdrStaticInfo::output>(C2_PARAMKEY_HDR_STATIC_INFO);
+ addLocalParam(new C2StreamSurfaceScalingInfo::output(0u, VIDEO_SCALING_MODE_SCALE_TO_FIT),
+ C2_PARAMKEY_SURFACE_SCALING_MODE);
+ } else {
+ addLocalParam(new C2StreamColorAspectsInfo::input(0u), C2_PARAMKEY_COLOR_ASPECTS);
+ }
+ }
+
+ initializeStandardParams();
+
+ // subscribe to all supported standard (exposed) params
+ // TODO: limit this to params that are actually in the domain
+ std::vector<std::string> formatKeys = mStandardParams->getPathsForDomain(Domain(1 << 30));
+ std::vector<C2Param::Index> indices;
+ mParamUpdater->getParamIndicesForKeys(formatKeys, &indices);
+ mSubscribedIndices.insert(indices.begin(), indices.end());
+
+ // also subscribe to some non-SDK standard parameters
+ // for number of input/output buffers
+ mSubscribedIndices.emplace(C2PortSuggestedBufferCountTuning::input::PARAM_TYPE);
+ mSubscribedIndices.emplace(C2PortSuggestedBufferCountTuning::output::PARAM_TYPE);
+ mSubscribedIndices.emplace(C2ActualPipelineDelayTuning::PARAM_TYPE);
+ mSubscribedIndices.emplace(C2PortActualDelayTuning::input::PARAM_TYPE);
+ mSubscribedIndices.emplace(C2PortActualDelayTuning::output::PARAM_TYPE);
+ // for output buffer array allocation
+ mSubscribedIndices.emplace(C2StreamMaxBufferSizeInfo::output::PARAM_TYPE);
+ // init data (CSD)
+ mSubscribedIndices.emplace(C2StreamInitDataInfo::output::PARAM_TYPE);
+
+ return OK;
+}
+
+status_t CCodecConfig::subscribeToConfigUpdate(
+ const std::shared_ptr<Codec2Client::Component> &component,
+ const std::vector<C2Param::Index> &indices,
+ c2_blocking_t blocking) {
+ mSubscribedIndices.insert(indices.begin(), indices.end());
+ // TODO: enable this when components no longer crash on this config
+ if (mSubscribedIndices.size() != mSubscribedIndicesSize && false) {
+ std::vector<uint32_t> indices;
+ for (C2Param::Index ix : mSubscribedIndices) {
+ indices.push_back(ix);
+ }
+ std::unique_ptr<C2SubscribedParamIndicesTuning> subscribeTuning =
+ C2SubscribedParamIndicesTuning::AllocUnique(indices);
+ std::vector<std::unique_ptr<C2SettingResult>> results;
+ c2_status_t c2Err = component->config({ subscribeTuning.get() }, blocking, &results);
+ if (c2Err != C2_OK && c2Err != C2_BAD_INDEX) {
+ ALOGD("Failed to subscribe to parameters => %s", asString(c2Err));
+ // TODO: error
+ }
+ ALOGV("Subscribed to %zu params", mSubscribedIndices.size());
+ mSubscribedIndicesSize = mSubscribedIndices.size();
+ }
+ return OK;
+}
+
+status_t CCodecConfig::queryConfiguration(
+ const std::shared_ptr<Codec2Client::Component> &component) {
+ // query all subscribed parameters
+ std::vector<C2Param::Index> indices(mSubscribedIndices.begin(), mSubscribedIndices.end());
+ std::vector<std::unique_ptr<C2Param>> queried;
+ c2_status_t c2Err = component->query({}, indices, C2_MAY_BLOCK, &queried);
+ if (c2Err != OK) {
+ ALOGI("query failed after returning %zu values (%s)", queried.size(), asString(c2Err));
+ // TODO: error
+ }
+
+ updateConfiguration(queried, ALL);
+ return OK;
+}
+
+bool CCodecConfig::updateConfiguration(
+ std::vector<std::unique_ptr<C2Param>> &configUpdate, Domain domain) {
+ ALOGV("updating configuration with %zu params", configUpdate.size());
+ bool changed = false;
+ for (std::unique_ptr<C2Param> &p : configUpdate) {
+ if (p && *p) {
+ auto insertion = mCurrentConfig.emplace(p->index(), nullptr);
+ if (insertion.second || *insertion.first->second != *p) {
+ if (mSupportedIndices.count(p->index()) || mLocalParams.count(p->index())) {
+ // only track changes in supported (reflected or local) indices
+ changed = true;
+ } else {
+ ALOGV("an unlisted config was %s: %#x",
+ insertion.second ? "added" : "updated", p->index());
+ }
+ }
+ insertion.first->second = std::move(p);
+ }
+ }
+
+ ALOGV("updated configuration has %zu params (%s)", mCurrentConfig.size(),
+ changed ? "CHANGED" : "no change");
+ if (changed) {
+ return updateFormats(domain);
+ }
+ return false;
+}
+
+bool CCodecConfig::updateFormats(Domain domain) {
+ // get addresses of params in the current config
+ std::vector<C2Param*> paramPointers;
+ for (const auto &it : mCurrentConfig) {
+ paramPointers.push_back(it.second.get());
+ }
+
+ ReflectedParamUpdater::Dict reflected = mParamUpdater->getParams(paramPointers);
+ ALOGD("c2 config is %s", reflected.debugString().c_str());
+
+ bool changed = false;
+ if (domain & mInputDomain) {
+ sp<AMessage> oldFormat = mInputFormat;
+ mInputFormat = mInputFormat->dup(); // trigger format changed
+ mInputFormat->extend(getSdkFormatForDomain(reflected, mInputDomain));
+ if (mInputFormat->countEntries() != oldFormat->countEntries()
+ || mInputFormat->changesFrom(oldFormat)->countEntries() > 0) {
+ changed = true;
+ } else {
+ mInputFormat = oldFormat; // no change
+ }
+ }
+ if (domain & mOutputDomain) {
+ sp<AMessage> oldFormat = mOutputFormat;
+ mOutputFormat = mOutputFormat->dup(); // trigger output format changed
+ mOutputFormat->extend(getSdkFormatForDomain(reflected, mOutputDomain));
+ if (mOutputFormat->countEntries() != oldFormat->countEntries()
+ || mOutputFormat->changesFrom(oldFormat)->countEntries() > 0) {
+ changed = true;
+ } else {
+ mOutputFormat = oldFormat; // no change
+ }
+ }
+ ALOGV_IF(changed, "format(s) changed");
+ return changed;
+}
+
+sp<AMessage> CCodecConfig::getSdkFormatForDomain(
+ const ReflectedParamUpdater::Dict &reflected, Domain portDomain) const {
+ sp<AMessage> msg = new AMessage;
+ for (const std::pair<std::string, std::vector<ConfigMapper>> &el : mStandardParams->getKeys()) {
+ for (const ConfigMapper &cm : el.second) {
+ if ((cm.domain() & portDomain) == 0 // input-output-coded-raw
+ || (cm.domain() & mDomain) != mDomain // component domain + kind (these must match)
+ || (cm.domain() & IS_READ) == 0) {
+ continue;
+ }
+ auto it = reflected.find(cm.path());
+ if (it == reflected.end()) {
+ continue;
+ }
+ C2Value c2Value;
+ sp<ABuffer> bufValue;
+ AString strValue;
+ AMessage::ItemData item;
+ if (it->second.find(&c2Value)) {
+ item = cm.mapToMessage(c2Value);
+ } else if (it->second.find(&bufValue)) {
+ item.set(bufValue);
+ } else if (it->second.find(&strValue)) {
+ item.set(strValue);
+ } else {
+ ALOGD("unexpected untyped query value for key: %s", cm.path().c_str());
+ continue;
+ }
+ msg->setItem(el.first.c_str(), item);
+ }
+ }
+
+ { // convert from Codec 2.0 rect to MediaFormat rect and add crop rect if not present
+ int32_t left, top, width, height;
+ if (msg->findInt32("crop-left", &left) && msg->findInt32("crop-width", &width)
+ && msg->findInt32("crop-top", &top) && msg->findInt32("crop-height", &height)
+ && left >= 0 && width >=0 && width <= INT32_MAX - left
+ && top >= 0 && height >=0 && height <= INT32_MAX - top) {
+ msg->removeEntryAt(msg->findEntryByName("crop-left"));
+ msg->removeEntryAt(msg->findEntryByName("crop-top"));
+ msg->removeEntryAt(msg->findEntryByName("crop-width"));
+ msg->removeEntryAt(msg->findEntryByName("crop-height"));
+ msg->setRect("crop", left, top, left + width - 1, top + height - 1);
+ } else if (msg->findInt32("width", &width) && msg->findInt32("height", &height)) {
+ msg->setRect("crop", 0, 0, width - 1, height - 1);
+ }
+ }
+
+ { // convert temporal layering to schema
+ sp<ABuffer> tmp;
+ if (msg->findBuffer(C2_PARAMKEY_TEMPORAL_LAYERING, &tmp) && tmp != nullptr) {
+ C2StreamTemporalLayeringTuning *layering =
+ C2StreamTemporalLayeringTuning::From(C2Param::From(tmp->data(), tmp->size()));
+ if (layering && layering->m.layerCount > 0
+ && layering->m.bLayerCount < layering->m.layerCount) {
+ // check if this is webrtc compatible
+ AString mime;
+ if (msg->findString(KEY_MIME, &mime) &&
+ mime.equalsIgnoreCase(MIMETYPE_VIDEO_VP8) &&
+ layering->m.bLayerCount == 0 &&
+ (layering->m.layerCount == 1
+ || (layering->m.layerCount == 2
+ && layering->flexCount() >= 1
+ && layering->m.bitrateRatios[0] == .6f)
+ || (layering->m.layerCount == 3
+ && layering->flexCount() >= 2
+ && layering->m.bitrateRatios[0] == .4f
+ && layering->m.bitrateRatios[1] == .6f)
+ || (layering->m.layerCount == 4
+ && layering->flexCount() >= 3
+ && layering->m.bitrateRatios[0] == .25f
+ && layering->m.bitrateRatios[1] == .4f
+ && layering->m.bitrateRatios[2] == .6f))) {
+ msg->setString(KEY_TEMPORAL_LAYERING, AStringPrintf(
+ "webrtc.vp8.%u-layer", layering->m.layerCount));
+ } else if (layering->m.bLayerCount) {
+ msg->setString(KEY_TEMPORAL_LAYERING, AStringPrintf(
+ "android.generic.%u+%u",
+ layering->m.layerCount - layering->m.bLayerCount,
+ layering->m.bLayerCount));
+ } else if (layering->m.bLayerCount) {
+ msg->setString(KEY_TEMPORAL_LAYERING, AStringPrintf(
+ "android.generic.%u", layering->m.layerCount));
+ }
+ }
+ msg->removeEntryAt(msg->findEntryByName(C2_PARAMKEY_TEMPORAL_LAYERING));
+ }
+ }
+
+ { // convert color info
+ C2Color::primaries_t primaries;
+ C2Color::matrix_t matrix;
+ if (msg->findInt32("color-primaries", (int32_t*)&primaries)
+ && msg->findInt32("color-matrix", (int32_t*)&matrix)) {
+ int32_t standard;
+
+ if (C2Mapper::map(primaries, matrix, &standard)) {
+ msg->setInt32(KEY_COLOR_STANDARD, standard);
+ }
+
+ msg->removeEntryAt(msg->findEntryByName("color-primaries"));
+ msg->removeEntryAt(msg->findEntryByName("color-matrix"));
+ }
+
+
+ // calculate dataspace for raw graphic buffers if not specified by component, or if
+ // using surface with unspecified aspects (as those must be defaulted which may change
+ // the dataspace)
+ if ((portDomain & IS_RAW) && (mDomain & (IS_IMAGE | IS_VIDEO))) {
+ android_dataspace dataspace;
+ ColorAspects aspects = {
+ ColorAspects::RangeUnspecified, ColorAspects::PrimariesUnspecified,
+ ColorAspects::TransferUnspecified, ColorAspects::MatrixUnspecified
+ };
+ ColorUtils::getColorAspectsFromFormat(msg, aspects);
+ ColorAspects origAspects = aspects;
+ if (mUsingSurface) {
+ // get image size (default to HD)
+ int32_t width = 1280;
+ int32_t height = 720;
+ int32_t left, top, right, bottom;
+ if (msg->findRect("crop", &left, &top, &right, &bottom)) {
+ width = right - left + 1;
+ height = bottom - top + 1;
+ } else {
+ (void)msg->findInt32(KEY_WIDTH, &width);
+ (void)msg->findInt32(KEY_HEIGHT, &height);
+ }
+ ColorUtils::setDefaultCodecColorAspectsIfNeeded(aspects, width, height);
+ ColorUtils::setColorAspectsIntoFormat(aspects, msg);
+ }
+
+ if (!msg->findInt32("android._dataspace", (int32_t*)&dataspace)
+ || aspects.mRange != origAspects.mRange
+ || aspects.mPrimaries != origAspects.mPrimaries
+ || aspects.mTransfer != origAspects.mTransfer
+ || aspects.mMatrixCoeffs != origAspects.mMatrixCoeffs) {
+ dataspace = ColorUtils::getDataSpaceForColorAspects(aspects, true /* mayExpand */);
+ msg->setInt32("android._dataspace", dataspace);
+ }
+ }
+
+ // HDR static info
+
+ C2HdrStaticMetadataStruct hdr;
+ if (msg->findFloat("smpte2086.red.x", &hdr.mastering.red.x)
+ && msg->findFloat("smpte2086.red.y", &hdr.mastering.red.y)
+ && msg->findFloat("smpte2086.green.x", &hdr.mastering.green.x)
+ && msg->findFloat("smpte2086.green.y", &hdr.mastering.green.y)
+ && msg->findFloat("smpte2086.blue.x", &hdr.mastering.blue.x)
+ && msg->findFloat("smpte2086.blue.y", &hdr.mastering.blue.y)
+ && msg->findFloat("smpte2086.white.x", &hdr.mastering.white.x)
+ && msg->findFloat("smpte2086.white.y", &hdr.mastering.white.y)
+ && msg->findFloat("smpte2086.max-luminance", &hdr.mastering.maxLuminance)
+ && msg->findFloat("smpte2086.min-luminance", &hdr.mastering.minLuminance)
+ && msg->findFloat("cta861.max-cll", &hdr.maxCll)
+ && msg->findFloat("cta861.max-fall", &hdr.maxFall)) {
+ if (hdr.mastering.red.x >= 0 && hdr.mastering.red.x <= 1
+ && hdr.mastering.red.y >= 0 && hdr.mastering.red.y <= 1
+ && hdr.mastering.green.x >= 0 && hdr.mastering.green.x <= 1
+ && hdr.mastering.green.y >= 0 && hdr.mastering.green.y <= 1
+ && hdr.mastering.blue.x >= 0 && hdr.mastering.blue.x <= 1
+ && hdr.mastering.blue.y >= 0 && hdr.mastering.blue.y <= 1
+ && hdr.mastering.white.x >= 0 && hdr.mastering.white.x <= 1
+ && hdr.mastering.white.y >= 0 && hdr.mastering.white.y <= 1
+ && hdr.mastering.maxLuminance >= 0 && hdr.mastering.maxLuminance <= 65535
+ && hdr.mastering.minLuminance >= 0 && hdr.mastering.minLuminance <= 6.5535
+ && hdr.maxCll >= 0 && hdr.maxCll <= 65535
+ && hdr.maxFall >= 0 && hdr.maxFall <= 65535) {
+ HDRStaticInfo meta;
+ meta.mID = meta.kType1;
+ meta.sType1.mR.x = hdr.mastering.red.x / 0.00002 + 0.5;
+ meta.sType1.mR.y = hdr.mastering.red.y / 0.00002 + 0.5;
+ meta.sType1.mG.x = hdr.mastering.green.x / 0.00002 + 0.5;
+ meta.sType1.mG.y = hdr.mastering.green.y / 0.00002 + 0.5;
+ meta.sType1.mB.x = hdr.mastering.blue.x / 0.00002 + 0.5;
+ meta.sType1.mB.y = hdr.mastering.blue.y / 0.00002 + 0.5;
+ meta.sType1.mW.x = hdr.mastering.white.x / 0.00002 + 0.5;
+ meta.sType1.mW.y = hdr.mastering.white.y / 0.00002 + 0.5;
+ meta.sType1.mMaxDisplayLuminance = hdr.mastering.maxLuminance + 0.5;
+ meta.sType1.mMinDisplayLuminance = hdr.mastering.minLuminance / 0.0001 + 0.5;
+ meta.sType1.mMaxContentLightLevel = hdr.maxCll + 0.5;
+ meta.sType1.mMaxFrameAverageLightLevel = hdr.maxFall + 0.5;
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.red.x"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.red.y"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.green.x"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.green.y"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.blue.x"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.blue.y"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.white.x"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.white.y"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.max-luminance"));
+ msg->removeEntryAt(msg->findEntryByName("smpte2086.min-luminance"));
+ msg->removeEntryAt(msg->findEntryByName("cta861.max-cll"));
+ msg->removeEntryAt(msg->findEntryByName("cta861.max-fall"));
+ msg->setBuffer(KEY_HDR_STATIC_INFO, ABuffer::CreateAsCopy(&meta, sizeof(meta)));
+ } else {
+ ALOGD("found invalid HDR static metadata %s", msg->debugString(8).c_str());
+ }
+ }
+ }
+
+ ALOGV("converted to SDK values as %s", msg->debugString().c_str());
+ return msg;
+}
+
+/// converts an AMessage value to a ParamUpdater value
+static void convert(const AMessage::ItemData &from, ReflectedParamUpdater::Value *to) {
+ int32_t int32Value;
+ int64_t int64Value;
+ sp<ABuffer> bufValue;
+ AString strValue;
+ float floatValue;
+ double doubleValue;
+
+ if (from.find(&int32Value)) {
+ to->set(int32Value);
+ } else if (from.find(&int64Value)) {
+ to->set(int64Value);
+ } else if (from.find(&bufValue)) {
+ to->set(bufValue);
+ } else if (from.find(&strValue)) {
+ to->set(strValue);
+ } else if (from.find(&floatValue)) {
+ to->set(C2Value(floatValue));
+ } else if (from.find(&doubleValue)) {
+ // convert double to float
+ to->set(C2Value((float)doubleValue));
+ }
+ // ignore all other AMessage types
+}
+
+/// relaxes Codec 2.0 specific value types to SDK types (mainly removes signedness and counterness
+/// from 32/64-bit values.)
+static void relaxValues(ReflectedParamUpdater::Value &item) {
+ C2Value c2Value;
+ int32_t int32Value;
+ int64_t int64Value;
+ (void)item.find(&c2Value);
+ if (c2Value.get(&int32Value) || c2Value.get((uint32_t*)&int32Value)
+ || c2Value.get((c2_cntr32_t*)&int32Value)) {
+ item.set(int32Value);
+ } else if (c2Value.get(&int64Value)
+ || c2Value.get((uint64_t*)&int64Value)
+ || c2Value.get((c2_cntr64_t*)&int64Value)) {
+ item.set(int64Value);
+ }
+}
+
+ReflectedParamUpdater::Dict CCodecConfig::getReflectedFormat(
+ const sp<AMessage> ¶ms_, Domain configDomain) const {
+ // create a modifiable copy of params
+ sp<AMessage> params = params_->dup();
+ ALOGV("filtering with config domain %x", configDomain);
+
+ // convert some macro parameters to Codec 2.0 specific expressions
+
+ { // make i-frame-interval frame based
+ float iFrameInterval;
+ if (params->findAsFloat(KEY_I_FRAME_INTERVAL, &iFrameInterval)) {
+ float frameRate;
+ if (params->findAsFloat(KEY_FRAME_RATE, &frameRate)) {
+ params->setInt32("i-frame-period",
+ (frameRate <= 0 || iFrameInterval < 0)
+ ? -1 /* no sync frames */
+ : (int32_t)c2_min(iFrameInterval * frameRate + 0.5,
+ (float)INT32_MAX));
+ }
+ }
+ }
+
+ if (mDomain == (IS_VIDEO | IS_ENCODER)) {
+ // convert capture-rate into input-time-stretch
+ float frameRate, captureRate;
+ if (params->findAsFloat(KEY_FRAME_RATE, &frameRate)) {
+ if (!params->findAsFloat("time-lapse-fps", &captureRate)
+ && !params->findAsFloat(KEY_CAPTURE_RATE, &captureRate)) {
+ captureRate = frameRate;
+ }
+ if (captureRate > 0 && frameRate > 0) {
+ params->setFloat(C2_PARAMKEY_INPUT_TIME_STRETCH, captureRate / frameRate);
+ }
+ }
+ }
+
+ { // reflect temporal layering into a binary blob
+ AString schema;
+ if (params->findString(KEY_TEMPORAL_LAYERING, &schema)) {
+ unsigned int numLayers = 0;
+ unsigned int numBLayers = 0;
+ int tags;
+ char dummy;
+ std::unique_ptr<C2StreamTemporalLayeringTuning::output> layering;
+ if (sscanf(schema.c_str(), "webrtc.vp8.%u-layer%c", &numLayers, &dummy) == 1
+ && numLayers > 0) {
+ switch (numLayers) {
+ case 1:
+ layering = C2StreamTemporalLayeringTuning::output::AllocUnique(
+ {}, 0u, 1u, 0u);
+ break;
+ case 2:
+ layering = C2StreamTemporalLayeringTuning::output::AllocUnique(
+ { .6f }, 0u, 2u, 0u);
+ break;
+ case 3:
+ layering = C2StreamTemporalLayeringTuning::output::AllocUnique(
+ { .4f, .6f }, 0u, 3u, 0u);
+ break;
+ default:
+ layering = C2StreamTemporalLayeringTuning::output::AllocUnique(
+ { .25f, .4f, .6f }, 0u, 4u, 0u);
+ break;
+ }
+ } else if ((tags = sscanf(schema.c_str(), "android.generic.%u%c%u%c",
+ &numLayers, &dummy, &numBLayers, &dummy))
+ && (tags == 1 || (tags == 3 && dummy == '+'))
+ && numLayers > 0 && numLayers < UINT32_MAX - numBLayers) {
+ layering = C2StreamTemporalLayeringTuning::output::AllocUnique(
+ {}, 0u, numLayers + numBLayers, numBLayers);
+ } else {
+ ALOGD("Ignoring unsupported ts-schema [%s]", schema.c_str());
+ }
+ if (layering) {
+ params->setBuffer(C2_PARAMKEY_TEMPORAL_LAYERING,
+ ABuffer::CreateAsCopy(layering.get(), layering->size()));
+ }
+ }
+ }
+
+ { // convert from MediaFormat rect to Codec 2.0 rect
+ int32_t offset;
+ int32_t end;
+ AMessage::ItemData item;
+ if (params->findInt32("crop-left", &offset) && params->findInt32("crop-right", &end)
+ && offset >= 0 && end >= offset - 1) {
+ size_t ix = params->findEntryByName("crop-right");
+ params->setEntryNameAt(ix, "crop-width");
+ item.set(end - offset + 1);
+ params->setEntryAt(ix, item);
+ }
+ if (params->findInt32("crop-top", &offset) && params->findInt32("crop-bottom", &end)
+ && offset >= 0 && end >= offset - 1) {
+ size_t ix = params->findEntryByName("crop-bottom");
+ params->setEntryNameAt(ix, "crop-height");
+ item.set(end - offset + 1);
+ params->setEntryAt(ix, item);
+ }
+ }
+
+ { // convert color info
+ int32_t standard;
+ if (params->findInt32(KEY_COLOR_STANDARD, &standard)) {
+ C2Color::primaries_t primaries;
+ C2Color::matrix_t matrix;
+
+ if (C2Mapper::map(standard, &primaries, &matrix)) {
+ params->setInt32("color-primaries", primaries);
+ params->setInt32("color-matrix", matrix);
+ }
+ }
+
+ sp<ABuffer> hdrMeta;
+ if (params->findBuffer(KEY_HDR_STATIC_INFO, &hdrMeta)
+ && hdrMeta->size() == sizeof(HDRStaticInfo)) {
+ HDRStaticInfo *meta = (HDRStaticInfo*)hdrMeta->data();
+ if (meta->mID == meta->kType1) {
+ params->setFloat("smpte2086.red.x", meta->sType1.mR.x * 0.00002);
+ params->setFloat("smpte2086.red.y", meta->sType1.mR.y * 0.00002);
+ params->setFloat("smpte2086.green.x", meta->sType1.mG.x * 0.00002);
+ params->setFloat("smpte2086.green.y", meta->sType1.mG.y * 0.00002);
+ params->setFloat("smpte2086.blue.x", meta->sType1.mB.x * 0.00002);
+ params->setFloat("smpte2086.blue.y", meta->sType1.mB.y * 0.00002);
+ params->setFloat("smpte2086.white.x", meta->sType1.mW.x * 0.00002);
+ params->setFloat("smpte2086.white.y", meta->sType1.mW.y * 0.00002);
+ params->setFloat("smpte2086.max-luminance", meta->sType1.mMaxDisplayLuminance);
+ params->setFloat("smpte2086.min-luminance", meta->sType1.mMinDisplayLuminance * 0.0001);
+ params->setFloat("cta861.max-cll", meta->sType1.mMaxContentLightLevel);
+ params->setFloat("cta861.max-fall", meta->sType1.mMaxFrameAverageLightLevel);
+ }
+ }
+ }
+
+ // this is to verify that we set proper signedness for standard parameters
+ bool beVeryStrict = property_get_bool("debug.stagefright.ccodec_strict_type", false);
+ // this is to allow vendors to use the wrong signedness for standard parameters
+ bool beVeryLax = property_get_bool("debug.stagefright.ccodec_lax_type", false);
+
+ ReflectedParamUpdater::Dict filtered;
+ for (size_t ix = 0; ix < params->countEntries(); ++ix) {
+ AMessage::Type type;
+ AString name = params->getEntryNameAt(ix, &type);
+ AMessage::ItemData msgItem = params->getEntryAt(ix);
+ ReflectedParamUpdater::Value item;
+ convert(msgItem, &item); // convert item to param updater item
+
+ if (name.startsWith("vendor.")) {
+ // vendor params pass through as is
+ filtered.emplace(name.c_str(), item);
+ continue;
+ }
+ // standard parameters may get modified, filtered or duplicated
+ for (const ConfigMapper &cm : mStandardParams->getConfigMappersForSdkKey(name.c_str())) {
+ // note: we ignore port domain for configuration
+ if ((cm.domain() & configDomain)
+ // component domain + kind (these must match)
+ && (cm.domain() & mDomain) == mDomain) {
+ // map arithmetic values, pass through string or buffer
+ switch (type) {
+ case AMessage::kTypeBuffer:
+ case AMessage::kTypeString:
+ break;
+ case AMessage::kTypeInt32:
+ case AMessage::kTypeInt64:
+ case AMessage::kTypeFloat:
+ case AMessage::kTypeDouble:
+ // for now only map settings with mappers as we are not creating
+ // signed <=> unsigned mappers
+ // TODO: be precise about signed unsigned
+ if (beVeryStrict || cm.mapper()) {
+ item.set(cm.mapFromMessage(params->getEntryAt(ix)));
+ // also allow to relax type strictness
+ if (beVeryLax) {
+ relaxValues(item);
+ }
+ }
+ break;
+ default:
+ continue;
+ }
+ filtered.emplace(cm.path(), item);
+ }
+ }
+ }
+ ALOGV("filtered %s to %s", params->debugString(4).c_str(),
+ filtered.debugString(4).c_str());
+ return filtered;
+}
+
+status_t CCodecConfig::getConfigUpdateFromSdkParams(
+ std::shared_ptr<Codec2Client::Component> component,
+ const sp<AMessage> &sdkParams, Domain configDomain,
+ c2_blocking_t blocking,
+ std::vector<std::unique_ptr<C2Param>> *configUpdate) const {
+ ReflectedParamUpdater::Dict params = getReflectedFormat(sdkParams, configDomain);
+
+ std::vector<C2Param::Index> indices;
+ mParamUpdater->getParamIndicesFromMessage(params, &indices);
+ if (indices.empty()) {
+ ALOGD("no recognized params in: %s", params.debugString().c_str());
+ return OK;
+ }
+
+ configUpdate->clear();
+ std::vector<C2Param::Index> supportedIndices;
+ for (C2Param::Index ix : indices) {
+ if (mSupportedIndices.count(ix)) {
+ supportedIndices.push_back(ix);
+ } else if (mLocalParams.count(ix)) {
+ // query local parameter here
+ auto it = mCurrentConfig.find(ix);
+ if (it != mCurrentConfig.end()) {
+ configUpdate->emplace_back(C2Param::Copy(*it->second));
+ }
+ }
+ }
+
+ c2_status_t err = component->query({ }, supportedIndices, blocking, configUpdate);
+ if (err != C2_OK) {
+ ALOGD("query failed after returning %zu params => %s", configUpdate->size(), asString(err));
+ }
+
+ if (configUpdate->size()) {
+ mParamUpdater->updateParamsFromMessage(params, configUpdate);
+ }
+ return OK;
+}
+
+status_t CCodecConfig::setParameters(
+ std::shared_ptr<Codec2Client::Component> component,
+ std::vector<std::unique_ptr<C2Param>> &configUpdate,
+ c2_blocking_t blocking) {
+ status_t result = OK;
+ if (configUpdate.empty()) {
+ return OK;
+ }
+
+ std::vector<C2Param::Index> indices;
+ std::vector<C2Param *> paramVector;
+ for (const std::unique_ptr<C2Param> ¶m : configUpdate) {
+ if (mSupportedIndices.count(param->index())) {
+ // component parameter
+ paramVector.push_back(param.get());
+ indices.push_back(param->index());
+ } else if (mLocalParams.count(param->index())) {
+ // handle local parameter here
+ LocalParamValidator validator = mLocalParams.find(param->index())->second;
+ c2_status_t err = C2_OK;
+ std::unique_ptr<C2Param> copy = C2Param::Copy(*param);
+ if (validator) {
+ err = validator(copy);
+ }
+ if (err == C2_OK) {
+ ALOGV("updated local parameter value for %s",
+ mParamUpdater->getParamName(param->index()).c_str());
+
+ mCurrentConfig[param->index()] = std::move(copy);
+ } else {
+ ALOGD("failed to set parameter value for %s => %s",
+ mParamUpdater->getParamName(param->index()).c_str(), asString(err));
+ result = BAD_VALUE;
+ }
+ }
+ }
+ // update subscribed param indices
+ subscribeToConfigUpdate(component, indices, blocking);
+
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ c2_status_t err = component->config(paramVector, blocking, &failures);
+ if (err != C2_OK) {
+ ALOGD("config failed => %s", asString(err));
+ // This is non-fatal.
+ }
+ for (const std::unique_ptr<C2SettingResult> &failure : failures) {
+ switch (failure->failure) {
+ case C2SettingResult::BAD_VALUE:
+ ALOGD("Bad parameter value");
+ result = BAD_VALUE;
+ break;
+ default:
+ ALOGV("failure = %d", int(failure->failure));
+ break;
+ }
+ }
+
+ // Re-query parameter values in case config could not update them and update the current
+ // configuration.
+ configUpdate.clear();
+ err = component->query({}, indices, blocking, &configUpdate);
+ if (err != C2_OK) {
+ ALOGD("query failed after returning %zu params => %s", configUpdate.size(), asString(err));
+ }
+ (void)updateConfiguration(configUpdate, ALL);
+
+ // TODO: error value
+ return result;
+}
+
+const C2Param *CCodecConfig::getConfigParameterValue(C2Param::Index index) const {
+ auto it = mCurrentConfig.find(index);
+ if (it == mCurrentConfig.end()) {
+ return nullptr;
+ } else {
+ return it->second.get();
+ }
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/CCodecConfig.h b/media/codec2/sfplugin/CCodecConfig.h
new file mode 100644
index 0000000..3bafe3f
--- /dev/null
+++ b/media/codec2/sfplugin/CCodecConfig.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C_CODEC_CONFIG_H_
+#define C_CODEC_CONFIG_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include <C2Component.h>
+#include <codec2/hidl/client.h>
+
+#include <utils/RefBase.h>
+
+#include "InputSurfaceWrapper.h"
+#include "ReflectedParamUpdater.h"
+
+namespace android {
+
+struct AMessage;
+struct StandardParams;
+
+/**
+ * Struct managing the codec configuration for CCodec.
+ */
+struct CCodecConfig {
+
+ /**
+ * Domain consists of a bitmask divided into fields, and specifiers work by excluding other
+ * values in those domains.
+ *
+ * Component domains are composed by or-ing the individual IS_ constants, e.g.
+ * IS_DECODER | IS_AUDIO.
+ *
+ * Config specifiers are composed by or-ing the individual mask constants, and
+ * and-ing these groups: e.g. (DECODER | ENCODER) & AUDIO.
+ *
+ * The naming of these constants was to limit the length of mask names as these are used more
+ * commonly as masks.
+ */
+ enum Domain : uint32_t {
+ // component domain (domain & kind)
+ GUARD_BIT = (1 << 1), ///< this is to prevent against accidental && or || usage
+ IS_AUDIO = (1 << 2), ///< for audio codecs
+ IS_VIDEO = (1 << 3), ///< for video codecs
+ IS_IMAGE = (1 << 4), ///< for image codecs
+ OTHER_DOMAIN = (1 << 5), ///< for other domains
+
+ IS_ENCODER = (1 << 6), ///< for encoders
+ IS_DECODER = (1 << 7), ///< for decoders
+ OTHER_KIND = (1 << 8), ///< for other domains
+
+ // config domain
+ IS_PARAM = (1 << 9), ///< for setParameter
+ IS_CONFIG = (1 << 10), ///< for configure
+ IS_READ = (1 << 11), ///< for getFormat
+
+ // port domain
+ IS_INPUT = (1 << 12), ///< for input port (getFormat)
+ IS_OUTPUT = (1 << 13), ///< for output port (getFormat)
+ IS_RAW = (1 << 14), ///< for raw port (input-encoder, output-decoder)
+ IS_CODED = (1 << 15), ///< for coded port (input-decoder, output-encoder)
+
+ ALL = ~0U,
+ NONE = 0,
+
+ AUDIO = ~(IS_IMAGE | IS_VIDEO | OTHER_DOMAIN),
+ VIDEO = ~(IS_AUDIO | IS_IMAGE | OTHER_DOMAIN),
+ IMAGE = ~(IS_AUDIO | IS_VIDEO | OTHER_DOMAIN),
+
+ DECODER = ~(IS_ENCODER | OTHER_KIND),
+ ENCODER = ~(IS_DECODER | OTHER_KIND),
+
+ PARAM = ~(IS_CONFIG | IS_READ),
+ CONFIG = ~(IS_PARAM | IS_READ),
+ READ = ~(IS_CONFIG | IS_PARAM),
+
+ INPUT = ~(IS_OUTPUT | IS_RAW | IS_CODED),
+ OUTPUT = ~(IS_INPUT | IS_RAW | IS_CODED),
+ RAW = ~(IS_INPUT | IS_OUTPUT | IS_CODED),
+ CODED = ~(IS_INPUT | IS_RAW | IS_OUTPUT),
+ };
+
+ // things required to manage formats
+ std::vector<std::shared_ptr<C2ParamDescriptor>> mParamDescs;
+ std::shared_ptr<C2ParamReflector> mReflector;
+
+ std::shared_ptr<ReflectedParamUpdater> mParamUpdater;
+
+ Domain mDomain; // component domain
+ Domain mInputDomain; // input port domain
+ Domain mOutputDomain; // output port domain
+ std::string mCodingMediaType; // media type of the coded stream
+
+ // standard MediaCodec to Codec 2.0 params mapping
+ std::shared_ptr<StandardParams> mStandardParams;
+
+ std::set<C2Param::Index> mSupportedIndices; ///< indices supported by the component
+ std::set<C2Param::Index> mSubscribedIndices; ///< indices to subscribe to
+ size_t mSubscribedIndicesSize; ///< count of currently subscribed indices
+
+ sp<AMessage> mInputFormat;
+ sp<AMessage> mOutputFormat;
+
+ bool mUsingSurface; ///< using input or output surface
+
+ std::shared_ptr<InputSurfaceWrapper> mInputSurface;
+ std::unique_ptr<InputSurfaceWrapper::Config> mISConfig;
+
+ /// the current configuration. Updated after configure() and based on configUpdate in
+ /// onWorkDone
+ std::map<C2Param::Index, std::unique_ptr<C2Param>> mCurrentConfig;
+
+ typedef std::function<c2_status_t(std::unique_ptr<C2Param>&)> LocalParamValidator;
+
+ /// Parameter indices tracked in current config that are not supported by the component.
+ /// these are provided so that optional parameters can remain in the current configuration.
+ /// as such, these parameters have no dependencies. TODO: use C2InterfaceHelper for this.
+ /// For now support a validation function.
+ std::map<C2Param::Index, LocalParamValidator> mLocalParams;
+
+ CCodecConfig();
+
+ /// initializes the members required to manage the format: descriptors, reflector,
+ /// reflected param helper, domain, standard params, and subscribes to standard
+ /// indices.
+ status_t initialize(
+ const std::shared_ptr<Codec2Client> &client,
+ const std::shared_ptr<Codec2Client::Component> &component);
+
+
+ /**
+ * Adds a locally maintained parameter. This is used for output configuration that can be
+ * appended to the output buffers in case it is not supported by the component.
+ */
+ template<typename T>
+ bool addLocalParam(
+ const std::string &name,
+ C2ParamDescriptor::attrib_t attrib = C2ParamDescriptor::IS_READ_ONLY,
+ std::function<c2_status_t(std::unique_ptr<T>&)> validator_ =
+ std::function<c2_status_t(std::unique_ptr<T>&)>()) {
+ C2Param::Index index = T::PARAM_TYPE;
+ if (mSupportedIndices.count(index) || mLocalParams.count(index)) {
+ if (mSupportedIndices.count(index)) {
+ mSubscribedIndices.emplace(index);
+ }
+ ALOGD("ignoring local param %s (%#x) as it is already %s",
+ name.c_str(), (uint32_t)index, mSupportedIndices.count(index) ? "supported" : "local");
+ return false; // already supported by the component or already added
+ }
+
+ // wrap typed validator into untyped validator
+ LocalParamValidator validator;
+ if (validator_) {
+ validator = [validator_](std::unique_ptr<C2Param>& p){
+ c2_status_t res = C2_BAD_VALUE;
+ std::unique_ptr<T> typed(static_cast<T*>(p.release()));
+ // if parameter is correctly typed
+ if (T::From(typed.get())) {
+ res = validator_(typed);
+ p.reset(typed.release());
+ }
+ return res;
+ };
+ }
+
+ mLocalParams.emplace(index, validator);
+ mParamUpdater->addStandardParam<T>(name, attrib);
+ return true;
+ }
+
+ /**
+ * Adds a locally maintained parameter with a default value.
+ */
+ template<typename T>
+ bool addLocalParam(
+ std::unique_ptr<T> default_,
+ const std::string &name,
+ C2ParamDescriptor::attrib_t attrib = C2ParamDescriptor::IS_READ_ONLY,
+ std::function<c2_status_t(std::unique_ptr<T>&)> validator_ =
+ std::function<c2_status_t(std::unique_ptr<T>&)>()) {
+ if (addLocalParam<T>(name, attrib, validator_)) {
+ if (validator_) {
+ c2_status_t err = validator_(default_);
+ if (err != C2_OK) {
+ ALOGD("default value for %s is invalid => %s", name.c_str(), asString(err));
+ return false;
+ }
+ }
+ mCurrentConfig[T::PARAM_TYPE] = std::move(default_);
+ return true;
+ }
+ return false;
+ }
+
+ template<typename T>
+ bool addLocalParam(
+ T *default_, const std::string &name,
+ C2ParamDescriptor::attrib_t attrib = C2ParamDescriptor::IS_READ_ONLY,
+ std::function<c2_status_t(std::unique_ptr<T>&)> validator_ =
+ std::function<c2_status_t(std::unique_ptr<T>&)>()) {
+ return addLocalParam(std::unique_ptr<T>(default_), name, attrib, validator_);
+ }
+
+ /// Applies configuration updates, and updates format in the specific domain.
+ /// Returns true if formats were updated
+ /// \param domain input/output bitmask
+ bool updateConfiguration(
+ std::vector<std::unique_ptr<C2Param>> &configUpdate, Domain domain);
+
+ /// Updates formats in the specific domain. Returns true if any of the formats have changed.
+ /// \param domain input/output bitmask
+ bool updateFormats(Domain domain);
+
+ /**
+ * Applies SDK configurations in a specific configuration domain.
+ * Updates relevant input/output formats and subscribes to parameters specified in the
+ * configuration.
+ * \param domain config/setParam bitmask
+ * \param blocking blocking mode to use with the component
+ */
+ status_t getConfigUpdateFromSdkParams(
+ std::shared_ptr<Codec2Client::Component> component,
+ const sp<AMessage> &sdkParams, Domain domain,
+ c2_blocking_t blocking,
+ std::vector<std::unique_ptr<C2Param>> *configUpdate) const;
+
+ /**
+ * Applies a configuration update to the component.
+ * Updates relevant input/output formats and subscribes to parameters specified in the
+ * configuration.
+ * \param blocking blocking mode to use with the component
+ */
+ status_t setParameters(
+ std::shared_ptr<Codec2Client::Component> component,
+ std::vector<std::unique_ptr<C2Param>> &configUpdate,
+ c2_blocking_t blocking);
+
+ /// Queries subscribed indices (which contains all SDK-exposed values) and updates
+ /// input/output formats.
+ status_t queryConfiguration(
+ const std::shared_ptr<Codec2Client::Component> &component);
+
+ /// Queries a configuration parameter value. Returns nullptr if the parameter is not
+ /// part of the current configuration
+ const C2Param *getConfigParameterValue(C2Param::Index index) const;
+
+ /**
+ * Object that can be used to access configuration parameters and if they change.
+ */
+ template<typename T>
+ struct Watcher {
+ ~Watcher() = default;
+
+ /// returns true if the value of this configuration has changed
+ bool hasChanged() const {
+ const C2Param *value = mParent->getConfigParameterValue(mIndex);
+ if (value && mValue) {
+ return *value != *mValue;
+ } else {
+ return value != mValue.get();
+ }
+ }
+
+ /// updates the current value and returns it
+ std::shared_ptr<const T> update() {
+ const C2Param *value = mParent->getConfigParameterValue(mIndex);
+ if (value) {
+ mValue = std::shared_ptr<const T>(T::From(C2Param::Copy(*value).release()));
+ }
+ return mValue;
+ }
+
+ private:
+ Watcher(C2Param::Index index, const CCodecConfig *parent)
+ : mParent(parent), mIndex(index) {
+ update();
+ }
+
+ friend struct CCodecConfig;
+
+ const CCodecConfig *mParent;
+ std::shared_ptr<const T> mValue;
+ C2Param::Index mIndex;
+ };
+
+ /**
+ * Returns a watcher object for a parameter.
+ */
+ template<typename T>
+ Watcher<T> watch(C2Param::Index index = T::PARAM_TYPE) const {
+ if (index.type() != T::PARAM_TYPE) {
+ __builtin_trap();
+ }
+ return Watcher<T>(index, this);
+ }
+
+private:
+
+ /// initializes the standard MediaCodec to Codec 2.0 params mapping
+ void initializeStandardParams();
+
+ /// Adds indices to the subscribed indices, and updated subscription to component
+ /// \param blocking blocking mode to use with the component
+ status_t subscribeToConfigUpdate(
+ const std::shared_ptr<Codec2Client::Component> &component,
+ const std::vector<C2Param::Index> &indices,
+ c2_blocking_t blocking = C2_DONT_BLOCK);
+
+ /// Gets SDK format from codec 2.0 reflected configuration
+ /// \param domain input/output bitmask
+ sp<AMessage> getSdkFormatForDomain(
+ const ReflectedParamUpdater::Dict &reflected, Domain domain) const;
+
+ /**
+ * Converts a set of configuration parameters in an AMessage to a list of path-based Codec
+ * 2.0 configuration parameters.
+ *
+ * \param domain config/setParam bitmask
+ */
+ ReflectedParamUpdater::Dict getReflectedFormat(
+ const sp<AMessage> &config, Domain domain) const;
+};
+
+DEFINE_ENUM_OPERATORS(CCodecConfig::Domain)
+
+} // namespace android
+
+#endif // C_CODEC_H_
+
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
new file mode 100644
index 0000000..bf6062e
--- /dev/null
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -0,0 +1,810 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2Buffer"
+#include <utils/Log.h>
+
+#include <hidlmemory/FrameworkUtils.h>
+#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/MediaCodecConstants.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <nativebase/nativebase.h>
+
+#include <C2AllocatorGralloc.h>
+#include <C2BlockInternal.h>
+#include <C2Debug.h>
+
+#include "Codec2Buffer.h"
+
+namespace android {
+
+// Codec2Buffer
+
+bool Codec2Buffer::canCopyLinear(const std::shared_ptr<C2Buffer> &buffer) const {
+ if (const_cast<Codec2Buffer *>(this)->base() == nullptr) {
+ return false;
+ }
+ if (!buffer) {
+ // Nothing to copy, so we can copy by doing nothing.
+ return true;
+ }
+ if (buffer->data().type() != C2BufferData::LINEAR) {
+ return false;
+ }
+ if (buffer->data().linearBlocks().size() == 0u) {
+ // Nothing to copy, so we can copy by doing nothing.
+ return true;
+ } else if (buffer->data().linearBlocks().size() > 1u) {
+ // We don't know how to copy more than one blocks.
+ return false;
+ }
+ if (buffer->data().linearBlocks()[0].size() > capacity()) {
+ // It won't fit.
+ return false;
+ }
+ return true;
+}
+
+bool Codec2Buffer::copyLinear(const std::shared_ptr<C2Buffer> &buffer) {
+ // We assume that all canCopyLinear() checks passed.
+ if (!buffer || buffer->data().linearBlocks().size() == 0u
+ || buffer->data().linearBlocks()[0].size() == 0u) {
+ setRange(0, 0);
+ return true;
+ }
+ C2ReadView view = buffer->data().linearBlocks()[0].map().get();
+ if (view.error() != C2_OK) {
+ ALOGD("Error while mapping: %d", view.error());
+ return false;
+ }
+ if (view.capacity() > capacity()) {
+ ALOGD("C2ConstLinearBlock lied --- it actually doesn't fit: view(%u) > this(%zu)",
+ view.capacity(), capacity());
+ return false;
+ }
+ memcpy(base(), view.data(), view.capacity());
+ setRange(0, view.capacity());
+ return true;
+}
+
+void Codec2Buffer::setImageData(const sp<ABuffer> &imageData) {
+ meta()->setBuffer("image-data", imageData);
+ format()->setBuffer("image-data", imageData);
+ MediaImage2 *img = (MediaImage2*)imageData->data();
+ if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
+ int32_t stride = img->mPlane[0].mRowInc;
+ format()->setInt32(KEY_STRIDE, stride);
+ if (img->mNumPlanes > 1 && stride > 0) {
+ int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
+ format()->setInt32(KEY_SLICE_HEIGHT, vstride);
+ }
+ }
+}
+
+// LocalLinearBuffer
+
+bool LocalLinearBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
+ return canCopyLinear(buffer);
+}
+
+bool LocalLinearBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
+ return copyLinear(buffer);
+}
+
+// DummyContainerBuffer
+
+DummyContainerBuffer::DummyContainerBuffer(
+ const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer)
+ : Codec2Buffer(format, new ABuffer(nullptr, 1)),
+ mBufferRef(buffer) {
+ setRange(0, buffer ? 1 : 0);
+}
+
+std::shared_ptr<C2Buffer> DummyContainerBuffer::asC2Buffer() {
+ return std::move(mBufferRef);
+}
+
+bool DummyContainerBuffer::canCopy(const std::shared_ptr<C2Buffer> &) const {
+ return !mBufferRef;
+}
+
+bool DummyContainerBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
+ mBufferRef = buffer;
+ setRange(0, mBufferRef ? 1 : 0);
+ return true;
+}
+
+// LinearBlockBuffer
+
+// static
+sp<LinearBlockBuffer> LinearBlockBuffer::Allocate(
+ const sp<AMessage> &format, const std::shared_ptr<C2LinearBlock> &block) {
+ C2WriteView writeView(block->map().get());
+ if (writeView.error() != C2_OK) {
+ return nullptr;
+ }
+ return new LinearBlockBuffer(format, std::move(writeView), block);
+}
+
+std::shared_ptr<C2Buffer> LinearBlockBuffer::asC2Buffer() {
+ return C2Buffer::CreateLinearBuffer(mBlock->share(offset(), size(), C2Fence()));
+}
+
+bool LinearBlockBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
+ return canCopyLinear(buffer);
+}
+
+bool LinearBlockBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
+ return copyLinear(buffer);
+}
+
+LinearBlockBuffer::LinearBlockBuffer(
+ const sp<AMessage> &format,
+ C2WriteView&& writeView,
+ const std::shared_ptr<C2LinearBlock> &block)
+ : Codec2Buffer(format, new ABuffer(writeView.data(), writeView.size())),
+ mWriteView(writeView),
+ mBlock(block) {
+}
+
+// ConstLinearBlockBuffer
+
+// static
+sp<ConstLinearBlockBuffer> ConstLinearBlockBuffer::Allocate(
+ const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer) {
+ if (!buffer
+ || buffer->data().type() != C2BufferData::LINEAR
+ || buffer->data().linearBlocks().size() != 1u) {
+ return nullptr;
+ }
+ C2ReadView readView(buffer->data().linearBlocks()[0].map().get());
+ if (readView.error() != C2_OK) {
+ return nullptr;
+ }
+ return new ConstLinearBlockBuffer(format, std::move(readView), buffer);
+}
+
+ConstLinearBlockBuffer::ConstLinearBlockBuffer(
+ const sp<AMessage> &format,
+ C2ReadView&& readView,
+ const std::shared_ptr<C2Buffer> &buffer)
+ : Codec2Buffer(format, new ABuffer(
+ // NOTE: ABuffer only takes non-const pointer but this data is
+ // supposed to be read-only.
+ const_cast<uint8_t *>(readView.data()), readView.capacity())),
+ mReadView(readView),
+ mBufferRef(buffer) {
+}
+
+std::shared_ptr<C2Buffer> ConstLinearBlockBuffer::asC2Buffer() {
+ return std::move(mBufferRef);
+}
+
+// GraphicView2MediaImageConverter
+
+namespace {
+
+class GraphicView2MediaImageConverter {
+public:
+ /**
+ * Creates a C2GraphicView <=> MediaImage converter
+ *
+ * \param view C2GraphicView object
+ * \param colorFormat desired SDK color format for the MediaImage (if this is a flexible format,
+ * an attempt is made to simply represent the graphic view as a flexible SDK format
+ * without a memcpy)
+ */
+ GraphicView2MediaImageConverter(
+ const C2GraphicView &view, int32_t colorFormat)
+ : mInitCheck(NO_INIT),
+ mView(view),
+ mWidth(view.width()),
+ mHeight(view.height()),
+ mColorFormat(colorFormat),
+ mAllocatedDepth(0),
+ mBackBufferSize(0),
+ mMediaImage(new ABuffer(sizeof(MediaImage2))) {
+ if (view.error() != C2_OK) {
+ ALOGD("Converter: view.error() = %d", view.error());
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ MediaImage2 *mediaImage = (MediaImage2 *)mMediaImage->base();
+ const C2PlanarLayout &layout = view.layout();
+ if (layout.numPlanes == 0) {
+ ALOGD("Converter: 0 planes");
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ mAllocatedDepth = layout.planes[0].allocatedDepth;
+ uint32_t bitDepth = layout.planes[0].bitDepth;
+
+ // align width and height to support subsampling cleanly
+ uint32_t mStride = align(mWidth, 2) * divUp(layout.planes[0].allocatedDepth, 8u);
+ uint32_t mVStride = align(mHeight, 2);
+
+ switch (layout.type) {
+ case C2PlanarLayout::TYPE_YUV:
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
+ if (layout.numPlanes != 3) {
+ ALOGD("Converter: %d planes for YUV layout", layout.numPlanes);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (layout.planes[0].channel != C2PlaneInfo::CHANNEL_Y
+ || layout.planes[1].channel != C2PlaneInfo::CHANNEL_CB
+ || layout.planes[2].channel != C2PlaneInfo::CHANNEL_CR
+ || layout.planes[0].colSampling != 1
+ || layout.planes[0].rowSampling != 1
+ || layout.planes[1].colSampling != 2
+ || layout.planes[1].rowSampling != 2
+ || layout.planes[2].colSampling != 2
+ || layout.planes[2].rowSampling != 2) {
+ ALOGD("Converter: not YUV420 for YUV layout");
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ switch (mColorFormat) {
+ case COLOR_FormatYUV420Flexible:
+ { // try to map directly. check if the planes are near one another
+ const uint8_t *minPtr = mView.data()[0];
+ const uint8_t *maxPtr = mView.data()[0];
+ int32_t planeSize = 0;
+ for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+ const C2PlaneInfo &plane = layout.planes[i];
+ ssize_t minOffset = plane.minOffset(mWidth, mHeight);
+ ssize_t maxOffset = plane.maxOffset(mWidth, mHeight);
+ if (minPtr > mView.data()[i] + minOffset) {
+ minPtr = mView.data()[i] + minOffset;
+ }
+ if (maxPtr < mView.data()[i] + maxOffset) {
+ maxPtr = mView.data()[i] + maxOffset;
+ }
+ planeSize += std::abs(plane.rowInc) * align(mHeight, 64)
+ / plane.rowSampling / plane.colSampling * divUp(mAllocatedDepth, 8u);
+ }
+
+ if ((maxPtr - minPtr + 1) <= planeSize) {
+ // FIXME: this is risky as reading/writing data out of bound results in
+ // an undefined behavior, but gralloc does assume a contiguous
+ // mapping
+ for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+ const C2PlaneInfo &plane = layout.planes[i];
+ mediaImage->mPlane[i].mOffset = mView.data()[i] - minPtr;
+ mediaImage->mPlane[i].mColInc = plane.colInc;
+ mediaImage->mPlane[i].mRowInc = plane.rowInc;
+ mediaImage->mPlane[i].mHorizSubsampling = plane.colSampling;
+ mediaImage->mPlane[i].mVertSubsampling = plane.rowSampling;
+ }
+ mWrapped = new ABuffer(const_cast<uint8_t *>(minPtr), maxPtr - minPtr + 1);
+ break;
+ }
+ }
+ [[fallthrough]];
+
+ case COLOR_FormatYUV420Planar:
+ case COLOR_FormatYUV420PackedPlanar:
+ mediaImage->mPlane[mediaImage->Y].mOffset = 0;
+ mediaImage->mPlane[mediaImage->Y].mColInc = 1;
+ mediaImage->mPlane[mediaImage->Y].mRowInc = mStride;
+ mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
+ mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
+
+ mediaImage->mPlane[mediaImage->U].mOffset = mStride * mVStride;
+ mediaImage->mPlane[mediaImage->U].mColInc = 1;
+ mediaImage->mPlane[mediaImage->U].mRowInc = mStride / 2;
+ mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
+
+ mediaImage->mPlane[mediaImage->V].mOffset = mStride * mVStride * 5 / 4;
+ mediaImage->mPlane[mediaImage->V].mColInc = 1;
+ mediaImage->mPlane[mediaImage->V].mRowInc = mStride / 2;
+ mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
+ break;
+
+ case COLOR_FormatYUV420SemiPlanar:
+ case COLOR_FormatYUV420PackedSemiPlanar:
+ mediaImage->mPlane[mediaImage->Y].mOffset = 0;
+ mediaImage->mPlane[mediaImage->Y].mColInc = 1;
+ mediaImage->mPlane[mediaImage->Y].mRowInc = mStride;
+ mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
+ mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
+
+ mediaImage->mPlane[mediaImage->U].mOffset = mStride * mVStride;
+ mediaImage->mPlane[mediaImage->U].mColInc = 2;
+ mediaImage->mPlane[mediaImage->U].mRowInc = mStride;
+ mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
+
+ mediaImage->mPlane[mediaImage->V].mOffset = mStride * mVStride + 1;
+ mediaImage->mPlane[mediaImage->V].mColInc = 2;
+ mediaImage->mPlane[mediaImage->V].mRowInc = mStride;
+ mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
+ break;
+
+ default:
+ ALOGD("Converter: incompactible color format (%d) for YUV layout", mColorFormat);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ break;
+ case C2PlanarLayout::TYPE_YUVA:
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_YUVA;
+ // We don't have an SDK YUVA format
+ ALOGD("Converter: incompactible color format (%d) for YUVA layout", mColorFormat);
+ mInitCheck = BAD_VALUE;
+ return;
+ case C2PlanarLayout::TYPE_RGB:
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_RGB;
+ switch (mColorFormat) {
+ // TODO media image
+ case COLOR_FormatRGBFlexible:
+ case COLOR_Format24bitBGR888:
+ case COLOR_Format24bitRGB888:
+ break;
+ default:
+ ALOGD("Converter: incompactible color format (%d) for RGB layout", mColorFormat);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (layout.numPlanes != 3) {
+ ALOGD("Converter: %d planes for RGB layout", layout.numPlanes);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ break;
+ case C2PlanarLayout::TYPE_RGBA:
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_RGBA;
+ switch (mColorFormat) {
+ // TODO media image
+ case COLOR_FormatRGBAFlexible:
+ case COLOR_Format32bitABGR8888:
+ case COLOR_Format32bitARGB8888:
+ case COLOR_Format32bitBGRA8888:
+ break;
+ default:
+ ALOGD("Incompactible color format (%d) for RGBA layout", mColorFormat);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (layout.numPlanes != 4) {
+ ALOGD("Converter: %d planes for RGBA layout", layout.numPlanes);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ break;
+ default:
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+ ALOGD("Unknown layout");
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ mediaImage->mNumPlanes = layout.numPlanes;
+ mediaImage->mWidth = mWidth;
+ mediaImage->mHeight = mHeight;
+ mediaImage->mBitDepth = bitDepth;
+ mediaImage->mBitDepthAllocated = mAllocatedDepth;
+
+ uint32_t bufferSize = 0;
+ for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+ const C2PlaneInfo &plane = layout.planes[i];
+ if (plane.allocatedDepth < plane.bitDepth
+ || plane.rightShift != plane.allocatedDepth - plane.bitDepth) {
+ ALOGD("rightShift value of %u unsupported", plane.rightShift);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (plane.allocatedDepth > 8 && plane.endianness != C2PlaneInfo::NATIVE) {
+ ALOGD("endianness value of %u unsupported", plane.endianness);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (plane.allocatedDepth != mAllocatedDepth || plane.bitDepth != bitDepth) {
+ ALOGV("different allocatedDepth/bitDepth per plane unsupported");
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ bufferSize += mStride * mVStride
+ / plane.rowSampling / plane.colSampling;
+ }
+
+ mBackBufferSize = bufferSize;
+ mInitCheck = OK;
+ }
+
+ status_t initCheck() const { return mInitCheck; }
+
+ uint32_t backBufferSize() const { return mBackBufferSize; }
+
+ /**
+ * Wrap C2GraphicView using a MediaImage2. Note that if not wrapped, the content is not mapped
+ * in this function --- the caller should use CopyGraphicView2MediaImage() function to copy the
+ * data into a backing buffer explicitly.
+ *
+ * \return media buffer. This is null if wrapping failed.
+ */
+ sp<ABuffer> wrap() const {
+ if (mBackBuffer == nullptr) {
+ return mWrapped;
+ }
+ return nullptr;
+ }
+
+ bool setBackBuffer(const sp<ABuffer> &backBuffer) {
+ if (backBuffer->capacity() < mBackBufferSize) {
+ return false;
+ }
+ backBuffer->setRange(0, mBackBufferSize);
+ mBackBuffer = backBuffer;
+ return true;
+ }
+
+ /**
+ * Copy C2GraphicView to MediaImage2.
+ */
+ status_t copyToMediaImage() {
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+ return ImageCopy(mBackBuffer->base(), getMediaImage(), mView);
+ }
+
+ const sp<ABuffer> &imageData() const { return mMediaImage; }
+
+private:
+ status_t mInitCheck;
+
+ const C2GraphicView mView;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ int32_t mColorFormat; ///< SDK color format for MediaImage
+ sp<ABuffer> mWrapped; ///< wrapped buffer (if we can map C2Buffer to an ABuffer)
+ uint32_t mAllocatedDepth;
+ uint32_t mBackBufferSize;
+ sp<ABuffer> mMediaImage;
+ std::function<sp<ABuffer>(size_t)> mAlloc;
+
+ sp<ABuffer> mBackBuffer; ///< backing buffer if we have to copy C2Buffer <=> ABuffer
+
+ MediaImage2 *getMediaImage() {
+ return (MediaImage2 *)mMediaImage->base();
+ }
+};
+
+} // namespace
+
+// GraphicBlockBuffer
+
+// static
+sp<GraphicBlockBuffer> GraphicBlockBuffer::Allocate(
+ const sp<AMessage> &format,
+ const std::shared_ptr<C2GraphicBlock> &block,
+ std::function<sp<ABuffer>(size_t)> alloc) {
+ C2GraphicView view(block->map().get());
+ if (view.error() != C2_OK) {
+ ALOGD("C2GraphicBlock::map failed: %d", view.error());
+ return nullptr;
+ }
+
+ int32_t colorFormat = COLOR_FormatYUV420Flexible;
+ (void)format->findInt32("color-format", &colorFormat);
+
+ GraphicView2MediaImageConverter converter(view, colorFormat);
+ if (converter.initCheck() != OK) {
+ ALOGD("Converter init failed: %d", converter.initCheck());
+ return nullptr;
+ }
+ bool wrapped = true;
+ sp<ABuffer> buffer = converter.wrap();
+ if (buffer == nullptr) {
+ buffer = alloc(converter.backBufferSize());
+ if (!converter.setBackBuffer(buffer)) {
+ ALOGD("Converter failed to set back buffer");
+ return nullptr;
+ }
+ wrapped = false;
+ }
+ return new GraphicBlockBuffer(
+ format,
+ buffer,
+ std::move(view),
+ block,
+ converter.imageData(),
+ wrapped);
+}
+
+GraphicBlockBuffer::GraphicBlockBuffer(
+ const sp<AMessage> &format,
+ const sp<ABuffer> &buffer,
+ C2GraphicView &&view,
+ const std::shared_ptr<C2GraphicBlock> &block,
+ const sp<ABuffer> &imageData,
+ bool wrapped)
+ : Codec2Buffer(format, buffer),
+ mView(view),
+ mBlock(block),
+ mImageData(imageData),
+ mWrapped(wrapped) {
+ setImageData(imageData);
+}
+
+std::shared_ptr<C2Buffer> GraphicBlockBuffer::asC2Buffer() {
+ uint32_t width = mView.width();
+ uint32_t height = mView.height();
+ if (!mWrapped) {
+ (void)ImageCopy(mView, base(), imageData());
+ }
+ return C2Buffer::CreateGraphicBuffer(
+ mBlock->share(C2Rect(width, height), C2Fence()));
+}
+
+// GraphicMetadataBuffer
+GraphicMetadataBuffer::GraphicMetadataBuffer(
+ const sp<AMessage> &format,
+ const std::shared_ptr<C2Allocator> &alloc)
+ : Codec2Buffer(format, new ABuffer(sizeof(VideoNativeMetadata))),
+ mAlloc(alloc) {
+ ((VideoNativeMetadata *)base())->pBuffer = nullptr;
+}
+
+std::shared_ptr<C2Buffer> GraphicMetadataBuffer::asC2Buffer() {
+#ifndef __LP64__
+ VideoNativeMetadata *meta = (VideoNativeMetadata *)base();
+ ANativeWindowBuffer *buffer = (ANativeWindowBuffer *)meta->pBuffer;
+ if (buffer == nullptr) {
+ ALOGD("VideoNativeMetadata contains null buffer");
+ return nullptr;
+ }
+
+ ALOGV("VideoNativeMetadata: %dx%d", buffer->width, buffer->height);
+ C2Handle *handle = WrapNativeCodec2GrallocHandle(
+ native_handle_clone(buffer->handle),
+ buffer->width,
+ buffer->height,
+ buffer->format,
+ buffer->usage,
+ buffer->stride);
+ std::shared_ptr<C2GraphicAllocation> alloc;
+ c2_status_t err = mAlloc->priorGraphicAllocation(handle, &alloc);
+ if (err != C2_OK) {
+ ALOGD("Failed to wrap VideoNativeMetadata into C2GraphicAllocation");
+ return nullptr;
+ }
+ std::shared_ptr<C2GraphicBlock> block = _C2BlockFactory::CreateGraphicBlock(alloc);
+
+ meta->pBuffer = 0;
+ // TODO: fence
+ return C2Buffer::CreateGraphicBuffer(
+ block->share(C2Rect(buffer->width, buffer->height), C2Fence()));
+#else
+ ALOGE("GraphicMetadataBuffer does not work on 64-bit arch");
+ return nullptr;
+#endif
+}
+
+// ConstGraphicBlockBuffer
+
+// static
+sp<ConstGraphicBlockBuffer> ConstGraphicBlockBuffer::Allocate(
+ const sp<AMessage> &format,
+ const std::shared_ptr<C2Buffer> &buffer,
+ std::function<sp<ABuffer>(size_t)> alloc) {
+ if (!buffer
+ || buffer->data().type() != C2BufferData::GRAPHIC
+ || buffer->data().graphicBlocks().size() != 1u) {
+ ALOGD("C2Buffer precond fail");
+ return nullptr;
+ }
+ std::unique_ptr<const C2GraphicView> view(std::make_unique<const C2GraphicView>(
+ buffer->data().graphicBlocks()[0].map().get()));
+ std::unique_ptr<const C2GraphicView> holder;
+
+ int32_t colorFormat = COLOR_FormatYUV420Flexible;
+ (void)format->findInt32("color-format", &colorFormat);
+
+ GraphicView2MediaImageConverter converter(*view, colorFormat);
+ if (converter.initCheck() != OK) {
+ ALOGD("Converter init failed: %d", converter.initCheck());
+ return nullptr;
+ }
+ bool wrapped = true;
+ sp<ABuffer> aBuffer = converter.wrap();
+ if (aBuffer == nullptr) {
+ aBuffer = alloc(converter.backBufferSize());
+ if (!converter.setBackBuffer(aBuffer)) {
+ ALOGD("Converter failed to set back buffer");
+ return nullptr;
+ }
+ wrapped = false;
+ converter.copyToMediaImage();
+ // We don't need the view.
+ holder = std::move(view);
+ }
+ return new ConstGraphicBlockBuffer(
+ format,
+ aBuffer,
+ std::move(view),
+ buffer,
+ converter.imageData(),
+ wrapped);
+}
+
+// static
+sp<ConstGraphicBlockBuffer> ConstGraphicBlockBuffer::AllocateEmpty(
+ const sp<AMessage> &format,
+ std::function<sp<ABuffer>(size_t)> alloc) {
+ int32_t width, height;
+ if (!format->findInt32("width", &width)
+ || !format->findInt32("height", &height)) {
+ ALOGD("format had no width / height");
+ return nullptr;
+ }
+ sp<ABuffer> aBuffer(alloc(width * height * 4));
+ return new ConstGraphicBlockBuffer(
+ format,
+ aBuffer,
+ nullptr,
+ nullptr,
+ nullptr,
+ false);
+}
+
+ConstGraphicBlockBuffer::ConstGraphicBlockBuffer(
+ const sp<AMessage> &format,
+ const sp<ABuffer> &aBuffer,
+ std::unique_ptr<const C2GraphicView> &&view,
+ const std::shared_ptr<C2Buffer> &buffer,
+ const sp<ABuffer> &imageData,
+ bool wrapped)
+ : Codec2Buffer(format, aBuffer),
+ mView(std::move(view)),
+ mBufferRef(buffer),
+ mWrapped(wrapped) {
+ if (imageData != nullptr) {
+ setImageData(imageData);
+ }
+}
+
+std::shared_ptr<C2Buffer> ConstGraphicBlockBuffer::asC2Buffer() {
+ mView.reset();
+ return std::move(mBufferRef);
+}
+
+bool ConstGraphicBlockBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
+ if (mWrapped || mBufferRef) {
+ ALOGD("ConstGraphicBlockBuffer::canCopy: %swrapped ; buffer ref %s",
+ mWrapped ? "" : "not ", mBufferRef ? "exists" : "doesn't exist");
+ return false;
+ }
+ if (!buffer) {
+ // Nothing to copy, so we can copy by doing nothing.
+ return true;
+ }
+ if (buffer->data().type() != C2BufferData::GRAPHIC) {
+ ALOGD("ConstGraphicBlockBuffer::canCopy: buffer precondition unsatisfied");
+ return false;
+ }
+ if (buffer->data().graphicBlocks().size() == 0) {
+ return true;
+ } else if (buffer->data().graphicBlocks().size() != 1u) {
+ ALOGD("ConstGraphicBlockBuffer::canCopy: too many blocks");
+ return false;
+ }
+
+ int32_t colorFormat = COLOR_FormatYUV420Flexible;
+ // FIXME: format() is not const, but we cannot change it, so do a const cast here
+ const_cast<ConstGraphicBlockBuffer *>(this)->format()->findInt32("color-format", &colorFormat);
+
+ GraphicView2MediaImageConverter converter(
+ buffer->data().graphicBlocks()[0].map().get(), colorFormat);
+ if (converter.initCheck() != OK) {
+ ALOGD("ConstGraphicBlockBuffer::canCopy: converter init failed: %d", converter.initCheck());
+ return false;
+ }
+ if (converter.backBufferSize() > capacity()) {
+ ALOGD("ConstGraphicBlockBuffer::canCopy: insufficient capacity: req %u has %zu",
+ converter.backBufferSize(), capacity());
+ return false;
+ }
+ return true;
+}
+
+bool ConstGraphicBlockBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
+ if (!buffer || buffer->data().graphicBlocks().size() == 0) {
+ setRange(0, 0);
+ return true;
+ }
+ int32_t colorFormat = COLOR_FormatYUV420Flexible;
+ format()->findInt32("color-format", &colorFormat);
+
+ GraphicView2MediaImageConverter converter(
+ buffer->data().graphicBlocks()[0].map().get(), colorFormat);
+ if (converter.initCheck() != OK) {
+ ALOGD("ConstGraphicBlockBuffer::copy: converter init failed: %d", converter.initCheck());
+ return false;
+ }
+ sp<ABuffer> aBuffer = new ABuffer(base(), capacity());
+ if (!converter.setBackBuffer(aBuffer)) {
+ ALOGD("ConstGraphicBlockBuffer::copy: set back buffer failed");
+ return false;
+ }
+ converter.copyToMediaImage();
+ setImageData(converter.imageData());
+ mBufferRef = buffer;
+ return true;
+}
+
+// EncryptedLinearBlockBuffer
+
+EncryptedLinearBlockBuffer::EncryptedLinearBlockBuffer(
+ const sp<AMessage> &format,
+ const std::shared_ptr<C2LinearBlock> &block,
+ const sp<IMemory> &memory,
+ int32_t heapSeqNum)
+ : Codec2Buffer(format, new ABuffer(memory->pointer(), memory->size())),
+ mBlock(block),
+ mMemory(memory),
+ mHeapSeqNum(heapSeqNum) {
+}
+
+std::shared_ptr<C2Buffer> EncryptedLinearBlockBuffer::asC2Buffer() {
+ return C2Buffer::CreateLinearBuffer(mBlock->share(offset(), size(), C2Fence()));
+}
+
+void EncryptedLinearBlockBuffer::fillSourceBuffer(
+ ICrypto::SourceBuffer *source) {
+ source->mSharedMemory = mMemory;
+ source->mHeapSeqNum = mHeapSeqNum;
+}
+
+void EncryptedLinearBlockBuffer::fillSourceBuffer(
+ hardware::cas::native::V1_0::SharedBuffer *source) {
+ ssize_t offset;
+ size_t size;
+
+ mHidlMemory = hardware::fromHeap(mMemory->getMemory(&offset, &size));
+ source->heapBase = *mHidlMemory;
+ source->offset = offset;
+ source->size = size;
+}
+
+bool EncryptedLinearBlockBuffer::copyDecryptedContent(
+ const sp<IMemory> &decrypted, size_t length) {
+ C2WriteView view = mBlock->map().get();
+ if (view.error() != C2_OK) {
+ return false;
+ }
+ if (view.size() < length) {
+ return false;
+ }
+ memcpy(view.data(), decrypted->pointer(), length);
+ return true;
+}
+
+bool EncryptedLinearBlockBuffer::copyDecryptedContentFromMemory(size_t length) {
+ return copyDecryptedContent(mMemory, length);
+}
+
+native_handle_t *EncryptedLinearBlockBuffer::handle() const {
+ return const_cast<native_handle_t *>(mBlock->handle());
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/Codec2Buffer.h b/media/codec2/sfplugin/Codec2Buffer.h
new file mode 100644
index 0000000..481975f
--- /dev/null
+++ b/media/codec2/sfplugin/Codec2Buffer.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_BUFFER_H_
+
+#define CODEC2_BUFFER_H_
+
+#include <C2Buffer.h>
+
+#include <android/hardware/cas/native/1.0/types.h>
+#include <binder/IMemory.h>
+#include <media/hardware/VideoAPI.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/ICrypto.h>
+
+namespace android {
+
+/**
+ * Copies a graphic view into a media image.
+ *
+ * \param imgBase base of MediaImage
+ * \param img MediaImage data
+ * \param view graphic view
+ *
+ * \return OK on success
+ */
+status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view);
+
+/**
+ * Copies a media image into a graphic view.
+ *
+ * \param view graphic view
+ * \param imgBase base of MediaImage
+ * \param img MediaImage data
+ *
+ * \return OK on success
+ */
+status_t ImageCopy(C2GraphicView &view, const uint8_t *imgBase, const MediaImage2 *img);
+
+class Codec2Buffer : public MediaCodecBuffer {
+public:
+ using MediaCodecBuffer::MediaCodecBuffer;
+ ~Codec2Buffer() override = default;
+
+ /**
+ * \return C2Buffer object represents this buffer.
+ */
+ virtual std::shared_ptr<C2Buffer> asC2Buffer() = 0;
+
+ /**
+ * Test if we can copy the content of |buffer| into this object.
+ *
+ * \param buffer C2Buffer object to copy.
+ * \return true if the content of buffer can be copied over to this buffer
+ * false otherwise.
+ */
+ virtual bool canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
+ (void)buffer;
+ return false;
+ }
+
+ /**
+ * Copy the content of |buffer| into this object. This method assumes that
+ * canCopy() check already passed.
+ *
+ * \param buffer C2Buffer object to copy.
+ * \return true if successful
+ * false otherwise.
+ */
+ virtual bool copy(const std::shared_ptr<C2Buffer> &buffer) {
+ (void)buffer;
+ return false;
+ }
+
+protected:
+ /**
+ * canCopy() implementation for linear buffers.
+ */
+ bool canCopyLinear(const std::shared_ptr<C2Buffer> &buffer) const;
+
+ /**
+ * copy() implementation for linear buffers.
+ */
+ bool copyLinear(const std::shared_ptr<C2Buffer> &buffer);
+
+ /**
+ * sets MediaImage data for flexible graphic buffers
+ */
+ void setImageData(const sp<ABuffer> &imageData);
+};
+
+/**
+ * MediaCodecBuffer implementation on top of local linear buffer. This cannot
+ * cross process boundary so asC2Buffer() returns only nullptr.
+ */
+class LocalLinearBuffer : public Codec2Buffer {
+public:
+ using Codec2Buffer::Codec2Buffer;
+
+ std::shared_ptr<C2Buffer> asC2Buffer() override { return nullptr; }
+ bool canCopy(const std::shared_ptr<C2Buffer> &buffer) const override;
+ bool copy(const std::shared_ptr<C2Buffer> &buffer) override;
+};
+
+/**
+ * MediaCodecBuffer implementation to be used only as a dummy wrapper around a
+ * C2Buffer object.
+ */
+class DummyContainerBuffer : public Codec2Buffer {
+public:
+ DummyContainerBuffer(
+ const sp<AMessage> &format,
+ const std::shared_ptr<C2Buffer> &buffer = nullptr);
+
+ std::shared_ptr<C2Buffer> asC2Buffer() override;
+ bool canCopy(const std::shared_ptr<C2Buffer> &buffer) const override;
+ bool copy(const std::shared_ptr<C2Buffer> &buffer) override;
+
+private:
+ std::shared_ptr<C2Buffer> mBufferRef;
+};
+
+/**
+ * MediaCodecBuffer implementation wraps around C2LinearBlock.
+ */
+class LinearBlockBuffer : public Codec2Buffer {
+public:
+ /**
+ * Allocate a new LinearBufferBlock wrapping around C2LinearBlock object.
+ *
+ * \param format mandatory buffer format for MediaCodecBuffer
+ * \param block C2LinearBlock object to wrap around.
+ * \return LinearBlockBuffer object with writable mapping.
+ * nullptr if unsuccessful.
+ */
+ static sp<LinearBlockBuffer> Allocate(
+ const sp<AMessage> &format, const std::shared_ptr<C2LinearBlock> &block);
+
+ virtual ~LinearBlockBuffer() = default;
+
+ std::shared_ptr<C2Buffer> asC2Buffer() override;
+ bool canCopy(const std::shared_ptr<C2Buffer> &buffer) const override;
+ bool copy(const std::shared_ptr<C2Buffer> &buffer) override;
+
+private:
+ LinearBlockBuffer(
+ const sp<AMessage> &format,
+ C2WriteView &&writeView,
+ const std::shared_ptr<C2LinearBlock> &block);
+ LinearBlockBuffer() = delete;
+
+ C2WriteView mWriteView;
+ std::shared_ptr<C2LinearBlock> mBlock;
+};
+
+/**
+ * MediaCodecBuffer implementation wraps around C2ConstLinearBlock.
+ */
+class ConstLinearBlockBuffer : public Codec2Buffer {
+public:
+ /**
+ * Allocate a new ConstLinearBlockBuffer wrapping around C2Buffer object.
+ *
+ * \param format mandatory buffer format for MediaCodecBuffer
+ * \param buffer linear C2Buffer object to wrap around.
+ * \return ConstLinearBlockBuffer object with readable mapping.
+ * nullptr if unsuccessful.
+ */
+ static sp<ConstLinearBlockBuffer> Allocate(
+ const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer);
+
+ virtual ~ConstLinearBlockBuffer() = default;
+
+ std::shared_ptr<C2Buffer> asC2Buffer() override;
+
+private:
+ ConstLinearBlockBuffer(
+ const sp<AMessage> &format,
+ C2ReadView &&readView,
+ const std::shared_ptr<C2Buffer> &buffer);
+ ConstLinearBlockBuffer() = delete;
+
+ C2ReadView mReadView;
+ std::shared_ptr<C2Buffer> mBufferRef;
+};
+
+/**
+ * MediaCodecBuffer implementation wraps around C2GraphicBlock.
+ *
+ * This object exposes the underlying bits via accessor APIs and "image-data"
+ * metadata, created automatically at allocation time.
+ */
+class GraphicBlockBuffer : public Codec2Buffer {
+public:
+ /**
+ * Allocate a new GraphicBlockBuffer wrapping around C2GraphicBlock object.
+ * If |block| is not in good color formats, it allocates YV12 local buffer
+ * and copies the content over at asC2Buffer().
+ *
+ * \param format mandatory buffer format for MediaCodecBuffer
+ * \param block C2GraphicBlock object to wrap around.
+ * \param alloc a function to allocate backing ABuffer if needed.
+ * \return GraphicBlockBuffer object with writable mapping.
+ * nullptr if unsuccessful.
+ */
+ static sp<GraphicBlockBuffer> Allocate(
+ const sp<AMessage> &format,
+ const std::shared_ptr<C2GraphicBlock> &block,
+ std::function<sp<ABuffer>(size_t)> alloc);
+
+ std::shared_ptr<C2Buffer> asC2Buffer() override;
+
+ virtual ~GraphicBlockBuffer() = default;
+
+private:
+ GraphicBlockBuffer(
+ const sp<AMessage> &format,
+ const sp<ABuffer> &buffer,
+ C2GraphicView &&view,
+ const std::shared_ptr<C2GraphicBlock> &block,
+ const sp<ABuffer> &imageData,
+ bool wrapped);
+ GraphicBlockBuffer() = delete;
+
+ inline MediaImage2 *imageData() { return (MediaImage2 *)mImageData->data(); }
+
+ C2GraphicView mView;
+ std::shared_ptr<C2GraphicBlock> mBlock;
+ sp<ABuffer> mImageData;
+ const bool mWrapped;
+};
+
+/**
+ * MediaCodecBuffer implementation wraps around VideoNativeMetadata.
+ */
+class GraphicMetadataBuffer : public Codec2Buffer {
+public:
+ /**
+ * Construct a new GraphicMetadataBuffer with local linear buffer for
+ * VideoNativeMetadata.
+ *
+ * \param format mandatory buffer format for MediaCodecBuffer
+ */
+ GraphicMetadataBuffer(
+ const sp<AMessage> &format, const std::shared_ptr<C2Allocator> &alloc);
+
+ std::shared_ptr<C2Buffer> asC2Buffer() override;
+
+ virtual ~GraphicMetadataBuffer() = default;
+
+private:
+ GraphicMetadataBuffer() = delete;
+
+ std::shared_ptr<C2Allocator> mAlloc;
+};
+
+/**
+ * MediaCodecBuffer implementation wraps around graphic C2Buffer object.
+ *
+ * This object exposes the underlying bits via accessor APIs and "image-data"
+ * metadata, created automatically at allocation time.
+ */
+class ConstGraphicBlockBuffer : public Codec2Buffer {
+public:
+ /**
+ * Allocate a new ConstGraphicBlockBuffer wrapping around C2Buffer object.
+ * If |buffer| is not in good color formats, it allocates YV12 local buffer
+ * and copies the content of |buffer| over to expose.
+ *
+ * \param format mandatory buffer format for MediaCodecBuffer
+ * \param buffer graphic C2Buffer object to wrap around.
+ * \param alloc a function to allocate backing ABuffer if needed.
+ * \return ConstGraphicBlockBuffer object with readable mapping.
+ * nullptr if unsuccessful.
+ */
+ static sp<ConstGraphicBlockBuffer> Allocate(
+ const sp<AMessage> &format,
+ const std::shared_ptr<C2Buffer> &buffer,
+ std::function<sp<ABuffer>(size_t)> alloc);
+
+ /**
+ * Allocate a new ConstGraphicBlockBuffer which allocates YV12 local buffer
+ * and copies the content of |buffer| over to expose.
+ *
+ * \param format mandatory buffer format for MediaCodecBuffer
+ * \param alloc a function to allocate backing ABuffer if needed.
+ * \return ConstGraphicBlockBuffer object with no wrapping buffer.
+ */
+ static sp<ConstGraphicBlockBuffer> AllocateEmpty(
+ const sp<AMessage> &format,
+ std::function<sp<ABuffer>(size_t)> alloc);
+
+ std::shared_ptr<C2Buffer> asC2Buffer() override;
+ bool canCopy(const std::shared_ptr<C2Buffer> &buffer) const override;
+ bool copy(const std::shared_ptr<C2Buffer> &buffer) override;
+
+ virtual ~ConstGraphicBlockBuffer() = default;
+
+private:
+ ConstGraphicBlockBuffer(
+ const sp<AMessage> &format,
+ const sp<ABuffer> &aBuffer,
+ std::unique_ptr<const C2GraphicView> &&view,
+ const std::shared_ptr<C2Buffer> &buffer,
+ const sp<ABuffer> &imageData,
+ bool wrapped);
+ ConstGraphicBlockBuffer() = delete;
+
+ sp<ABuffer> mImageData;
+ std::unique_ptr<const C2GraphicView> mView;
+ std::shared_ptr<C2Buffer> mBufferRef;
+ const bool mWrapped;
+};
+
+/**
+ * MediaCodecBuffer implementation wraps around C2LinearBlock for component
+ * and IMemory for client. Underlying C2LinearBlock won't be mapped for secure
+ * usecases..
+ */
+class EncryptedLinearBlockBuffer : public Codec2Buffer {
+public:
+ /**
+ * Construct a new EncryptedLinearBufferBlock wrapping around C2LinearBlock
+ * object and writable IMemory region.
+ *
+ * \param format mandatory buffer format for MediaCodecBuffer
+ * \param block C2LinearBlock object to wrap around.
+ * \param memory IMemory object to store encrypted content.
+ * \param heapSeqNum Heap sequence number from ICrypto; -1 if N/A
+ */
+ EncryptedLinearBlockBuffer(
+ const sp<AMessage> &format,
+ const std::shared_ptr<C2LinearBlock> &block,
+ const sp<IMemory> &memory,
+ int32_t heapSeqNum = -1);
+ EncryptedLinearBlockBuffer() = delete;
+
+ virtual ~EncryptedLinearBlockBuffer() = default;
+
+ std::shared_ptr<C2Buffer> asC2Buffer() override;
+
+ /**
+ * Fill the source buffer structure with appropriate value based on
+ * internal IMemory object.
+ *
+ * \param source source buffer structure to fill.
+ */
+ void fillSourceBuffer(ICrypto::SourceBuffer *source);
+ void fillSourceBuffer(
+ hardware::cas::native::V1_0::SharedBuffer *source);
+
+ /**
+ * Copy the content of |decrypted| into C2LinearBlock inside. This shall
+ * only be called in non-secure usecases.
+ *
+ * \param decrypted decrypted content to copy from.
+ * \param length length of the content
+ * \return true if successful
+ * false otherwise.
+ */
+ bool copyDecryptedContent(const sp<IMemory> &decrypted, size_t length);
+
+ /**
+ * Copy the content of internal IMemory object into C2LinearBlock inside.
+ * This shall only be called in non-secure usecases.
+ *
+ * \param length length of the content
+ * \return true if successful
+ * false otherwise.
+ */
+ bool copyDecryptedContentFromMemory(size_t length);
+
+ /**
+ * Return native handle of secure buffer understood by ICrypto.
+ *
+ * \return secure buffer handle
+ */
+ native_handle_t *handle() const;
+
+private:
+
+ std::shared_ptr<C2LinearBlock> mBlock;
+ sp<IMemory> mMemory;
+ sp<hardware::HidlMemory> mHidlMemory;
+ int32_t mHeapSeqNum;
+};
+
+} // namespace android
+
+#endif // CODEC2_BUFFER_H_
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
new file mode 100644
index 0000000..f36027e
--- /dev/null
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2InfoBuilder"
+#include <log/log.h>
+
+#include <strings.h>
+
+#include <C2Component.h>
+#include <C2Config.h>
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+#include <Codec2Mapper.h>
+
+#include <OMX_Audio.h>
+#include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
+#include <OMX_Types.h>
+#include <OMX_Video.h>
+#include <OMX_VideoExt.h>
+#include <OMX_AsString.h>
+
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/types.h>
+
+#include <android-base/properties.h>
+#include <codec2/hidl/client.h>
+#include <cutils/native_handle.h>
+#include <media/omx/1.0/WOmxNode.h>
+#include <media/stagefright/MediaCodecConstants.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/omx/OMXUtils.h>
+#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
+
+#include "Codec2InfoBuilder.h"
+
+namespace android {
+
+using Traits = C2Component::Traits;
+
+namespace /* unnamed */ {
+
+bool hasPrefix(const std::string& s, const char* prefix) {
+ size_t prefixLen = strlen(prefix);
+ return s.compare(0, prefixLen, prefix) == 0;
+}
+
+bool hasSuffix(const std::string& s, const char* suffix) {
+ size_t suffixLen = strlen(suffix);
+ return suffixLen > s.size() ? false :
+ s.compare(s.size() - suffixLen, suffixLen, suffix) == 0;
+}
+
+// Constants from ACodec
+constexpr OMX_U32 kPortIndexInput = 0;
+constexpr OMX_U32 kPortIndexOutput = 1;
+constexpr OMX_U32 kMaxIndicesToCheck = 32;
+
+status_t queryOmxCapabilities(
+ const char* name, const char* mime, bool isEncoder,
+ MediaCodecInfo::CapabilitiesWriter* caps) {
+
+ const char *role = GetComponentRole(isEncoder, mime);
+ if (role == nullptr) {
+ return BAD_VALUE;
+ }
+
+ using namespace ::android::hardware::media::omx::V1_0;
+ using ::android::hardware::Return;
+ using ::android::hardware::Void;
+ using ::android::hardware::hidl_vec;
+ using ::android::hardware::media::omx::V1_0::utils::LWOmxNode;
+
+ sp<IOmx> omx = IOmx::getService();
+ if (!omx) {
+ ALOGW("Could not obtain IOmx service.");
+ return NO_INIT;
+ }
+
+ struct Observer : IOmxObserver {
+ virtual Return<void> onMessages(const hidl_vec<Message>&) override {
+ return Void();
+ }
+ };
+
+ sp<Observer> observer = new Observer();
+ Status status;
+ sp<IOmxNode> tOmxNode;
+ Return<void> transStatus = omx->allocateNode(
+ name, observer,
+ [&status, &tOmxNode](Status s, const sp<IOmxNode>& n) {
+ status = s;
+ tOmxNode = n;
+ });
+ if (!transStatus.isOk()) {
+ ALOGW("IOmx::allocateNode -- transaction failed.");
+ return NO_INIT;
+ }
+ if (status != Status::OK) {
+ ALOGW("IOmx::allocateNode -- error returned: %d.",
+ static_cast<int>(status));
+ return NO_INIT;
+ }
+
+ sp<LWOmxNode> omxNode = new LWOmxNode(tOmxNode);
+
+ status_t err = SetComponentRole(omxNode, role);
+ if (err != OK) {
+ omxNode->freeNode();
+ ALOGW("Failed to SetComponentRole: component = %s, role = %s.",
+ name, role);
+ return err;
+ }
+
+ bool isVideo = hasPrefix(mime, "video/") == 0;
+ bool isImage = hasPrefix(mime, "image/") == 0;
+
+ if (isVideo || isImage) {
+ OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
+ InitOMXParams(¶m);
+ param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
+
+ for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
+ param.nProfileIndex = index;
+ status_t err = omxNode->getParameter(
+ OMX_IndexParamVideoProfileLevelQuerySupported,
+ ¶m, sizeof(param));
+ if (err != OK) {
+ break;
+ }
+ caps->addProfileLevel(param.eProfile, param.eLevel);
+
+ // AVC components may not list the constrained profiles explicitly, but
+ // decoders that support a profile also support its constrained version.
+ // Encoders must explicitly support constrained profiles.
+ if (!isEncoder && strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC) == 0) {
+ if (param.eProfile == OMX_VIDEO_AVCProfileHigh) {
+ caps->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedHigh, param.eLevel);
+ } else if (param.eProfile == OMX_VIDEO_AVCProfileBaseline) {
+ caps->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedBaseline, param.eLevel);
+ }
+ }
+
+ if (index == kMaxIndicesToCheck) {
+ ALOGW("[%s] stopping checking profiles after %u: %x/%x",
+ name, index,
+ param.eProfile, param.eLevel);
+ }
+ }
+
+ // Color format query
+ // return colors in the order reported by the OMX component
+ // prefix "flexible" standard ones with the flexible equivalent
+ OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
+ InitOMXParams(&portFormat);
+ portFormat.nPortIndex = isEncoder ? kPortIndexInput : kPortIndexOutput;
+ for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
+ portFormat.nIndex = index;
+ status_t err = omxNode->getParameter(
+ OMX_IndexParamVideoPortFormat,
+ &portFormat, sizeof(portFormat));
+ if (err != OK) {
+ break;
+ }
+
+ OMX_U32 flexibleEquivalent;
+ if (IsFlexibleColorFormat(
+ omxNode, portFormat.eColorFormat, false /* usingNativeWindow */,
+ &flexibleEquivalent)) {
+ caps->addColorFormat(flexibleEquivalent);
+ }
+ caps->addColorFormat(portFormat.eColorFormat);
+
+ if (index == kMaxIndicesToCheck) {
+ ALOGW("[%s] stopping checking formats after %u: %s(%x)",
+ name, index,
+ asString(portFormat.eColorFormat), portFormat.eColorFormat);
+ }
+ }
+ } else if (strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC) == 0) {
+ // More audio codecs if they have profiles.
+ OMX_AUDIO_PARAM_ANDROID_PROFILETYPE param;
+ InitOMXParams(¶m);
+ param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
+ for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
+ param.nProfileIndex = index;
+ status_t err = omxNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported,
+ ¶m, sizeof(param));
+ if (err != OK) {
+ break;
+ }
+ // For audio, level is ignored.
+ caps->addProfileLevel(param.eProfile, 0 /* level */);
+
+ if (index == kMaxIndicesToCheck) {
+ ALOGW("[%s] stopping checking profiles after %u: %x",
+ name, index,
+ param.eProfile);
+ }
+ }
+
+ // NOTE: Without Android extensions, OMX does not provide a way to query
+ // AAC profile support
+ if (param.nProfileIndex == 0) {
+ ALOGW("component %s doesn't support profile query.", name);
+ }
+ }
+
+ if (isVideo && !isEncoder) {
+ native_handle_t *sidebandHandle = nullptr;
+ if (omxNode->configureVideoTunnelMode(
+ kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
+ // tunneled playback includes adaptive playback
+ caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback
+ | MediaCodecInfo::Capabilities::kFlagSupportsTunneledPlayback);
+ } else if (omxNode->setPortMode(
+ kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
+ omxNode->prepareForAdaptivePlayback(
+ kPortIndexOutput, OMX_TRUE,
+ 1280 /* width */, 720 /* height */) == OK) {
+ caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback);
+ }
+ }
+
+ if (isVideo && isEncoder) {
+ OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexOutput;
+ // TODO: should we verify if fallback is supported?
+ if (omxNode->getConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
+ ¶ms, sizeof(params)) == OK) {
+ caps->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsIntraRefresh);
+ }
+ }
+
+ omxNode->freeNode();
+ return OK;
+}
+
+void buildOmxInfo(const MediaCodecsXmlParser& parser,
+ MediaCodecListWriter* writer) {
+ uint32_t omxRank = ::android::base::GetUintProperty(
+ "debug.stagefright.omx_default_rank", uint32_t(0x100));
+ for (const MediaCodecsXmlParser::Codec& codec : parser.getCodecMap()) {
+ const std::string &name = codec.first;
+ if (!hasPrefix(codec.first, "OMX.")) {
+ continue;
+ }
+ const MediaCodecsXmlParser::CodecProperties &properties = codec.second;
+ bool encoder = properties.isEncoder;
+ std::unique_ptr<MediaCodecInfoWriter> info =
+ writer->addMediaCodecInfo();
+ info->setName(name.c_str());
+ info->setOwner("default");
+ info->setEncoder(encoder);
+ info->setRank(omxRank);
+ for (const MediaCodecsXmlParser::Type& type : properties.typeMap) {
+ const std::string &mime = type.first;
+ std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
+ info->addMime(mime.c_str());
+ const MediaCodecsXmlParser::AttributeMap &attrMap = type.second;
+ for (const MediaCodecsXmlParser::Attribute& attr : attrMap) {
+ const std::string &key = attr.first;
+ const std::string &value = attr.second;
+ if (hasPrefix(key, "feature-") &&
+ !hasPrefix(key, "feature-bitrate-modes")) {
+ caps->addDetail(key.c_str(), hasPrefix(value, "1") ? 1 : 0);
+ } else {
+ caps->addDetail(key.c_str(), value.c_str());
+ }
+ }
+ status_t err = queryOmxCapabilities(
+ name.c_str(),
+ mime.c_str(),
+ encoder,
+ caps.get());
+ if (err != OK) {
+ ALOGE("Failed to query capabilities for %s (mime: %s). Error: %d",
+ name.c_str(),
+ mime.c_str(),
+ static_cast<int>(err));
+ }
+ }
+ }
+}
+
+} // unnamed namespace
+
+status_t Codec2InfoBuilder::buildMediaCodecList(MediaCodecListWriter* writer) {
+ // TODO: Remove run-time configurations once all codecs are working
+ // properly. (Assume "full" behavior eventually.)
+ //
+ // debug.stagefright.ccodec supports 5 values.
+ // 0 - Only OMX components are available.
+ // 1 - Audio decoders and encoders with prefix "c2.android." are available
+ // and ranked first.
+ // All other components with prefix "c2.android." are available with
+ // their normal ranks.
+ // Components with prefix "c2.vda." are available with their normal
+ // ranks.
+ // All other components with suffix ".avc.decoder" or ".avc.encoder"
+ // are available but ranked last.
+ // 2 - Components with prefix "c2.android." are available and ranked
+ // first.
+ // Components with prefix "c2.vda." are available with their normal
+ // ranks.
+ // All other components with suffix ".avc.decoder" or ".avc.encoder"
+ // are available but ranked last.
+ // 3 - Components with prefix "c2.android." are available and ranked
+ // first.
+ // All other components are available with their normal ranks.
+ // 4 - All components are available with their normal ranks.
+ //
+ // The default value (boot time) is 1.
+ //
+ // Note: Currently, OMX components have default rank 0x100, while all
+ // Codec2.0 software components have default rank 0x200.
+ int option = ::android::base::GetIntProperty("debug.stagefright.ccodec", 1);
+
+ // Obtain Codec2Client
+ std::vector<Traits> traits = Codec2Client::ListComponents();
+
+ MediaCodecsXmlParser parser(
+ MediaCodecsXmlParser::defaultSearchDirs,
+ option == 0 ? "media_codecs.xml" :
+ "media_codecs_c2.xml",
+ option == 0 ? "media_codecs_performance.xml" :
+ "media_codecs_performance_c2.xml");
+ if (parser.getParsingStatus() != OK) {
+ ALOGD("XML parser no good");
+ return OK;
+ }
+
+ bool surfaceTest(Codec2Client::CreateInputSurface());
+ if (option == 0 || !surfaceTest) {
+ buildOmxInfo(parser, writer);
+ }
+
+ for (const Traits& trait : traits) {
+ C2Component::rank_t rank = trait.rank;
+
+ std::shared_ptr<Codec2Client::Interface> intf =
+ Codec2Client::CreateInterfaceByName(trait.name.c_str());
+ if (!intf || parser.getCodecMap().count(intf->getName()) == 0) {
+ ALOGD("%s not found in xml", trait.name.c_str());
+ continue;
+ }
+ std::string canonName = intf->getName();
+
+ // TODO: Remove this block once all codecs are enabled by default.
+ switch (option) {
+ case 0:
+ continue;
+ case 1:
+ if (hasPrefix(canonName, "c2.vda.")) {
+ break;
+ }
+ if (hasPrefix(canonName, "c2.android.")) {
+ if (trait.domain == C2Component::DOMAIN_AUDIO) {
+ rank = 1;
+ break;
+ }
+ break;
+ }
+ if (hasSuffix(canonName, ".avc.decoder") ||
+ hasSuffix(canonName, ".avc.encoder")) {
+ rank = std::numeric_limits<decltype(rank)>::max();
+ break;
+ }
+ continue;
+ case 2:
+ if (hasPrefix(canonName, "c2.vda.")) {
+ break;
+ }
+ if (hasPrefix(canonName, "c2.android.")) {
+ rank = 1;
+ break;
+ }
+ if (hasSuffix(canonName, ".avc.decoder") ||
+ hasSuffix(canonName, ".avc.encoder")) {
+ rank = std::numeric_limits<decltype(rank)>::max();
+ break;
+ }
+ continue;
+ case 3:
+ if (hasPrefix(canonName, "c2.android.")) {
+ rank = 1;
+ }
+ break;
+ }
+
+ std::unique_ptr<MediaCodecInfoWriter> codecInfo = writer->addMediaCodecInfo();
+ codecInfo->setName(trait.name.c_str());
+ codecInfo->setOwner("codec2");
+ bool encoder = trait.kind == C2Component::KIND_ENCODER;
+ codecInfo->setEncoder(encoder);
+ codecInfo->setRank(rank);
+ const MediaCodecsXmlParser::CodecProperties &codec =
+ parser.getCodecMap().at(canonName);
+
+ for (auto typeIt = codec.typeMap.begin(); typeIt != codec.typeMap.end(); ++typeIt) {
+ const std::string &mediaType = typeIt->first;
+ const MediaCodecsXmlParser::AttributeMap &attrMap = typeIt->second;
+ std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
+ codecInfo->addMime(mediaType.c_str());
+ for (auto attrIt = attrMap.begin(); attrIt != attrMap.end(); ++attrIt) {
+ std::string key, value;
+ std::tie(key, value) = *attrIt;
+ if (key.find("feature-") == 0 && key.find("feature-bitrate-modes") != 0) {
+ caps->addDetail(key.c_str(), std::stoi(value));
+ } else {
+ caps->addDetail(key.c_str(), value.c_str());
+ }
+ }
+
+ bool gotProfileLevels = false;
+ if (intf) {
+ std::shared_ptr<C2Mapper::ProfileLevelMapper> mapper =
+ C2Mapper::GetProfileLevelMapper(trait.mediaType);
+ // if we don't know the media type, pass through all values unmapped
+
+ // TODO: we cannot find levels that are local 'maxima' without knowing the coding
+ // e.g. H.263 level 45 and level 30 could be two values for highest level as
+ // they don't include one another. For now we use the last supported value.
+ C2StreamProfileLevelInfo pl(encoder /* output */, 0u);
+ std::vector<C2FieldSupportedValuesQuery> profileQuery = {
+ C2FieldSupportedValuesQuery::Possible(C2ParamField(&pl, &pl.profile))
+ };
+
+ c2_status_t err = intf->querySupportedValues(profileQuery, C2_DONT_BLOCK);
+ ALOGV("query supported profiles -> %s | %s",
+ asString(err), asString(profileQuery[0].status));
+ if (err == C2_OK && profileQuery[0].status == C2_OK) {
+ if (profileQuery[0].values.type == C2FieldSupportedValues::VALUES) {
+ for (C2Value::Primitive profile : profileQuery[0].values.values) {
+ pl.profile = (C2Config::profile_t)profile.ref<uint32_t>();
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ err = intf->config({&pl}, C2_DONT_BLOCK, &failures);
+ ALOGV("set profile to %u -> %s", pl.profile, asString(err));
+ std::vector<C2FieldSupportedValuesQuery> levelQuery = {
+ C2FieldSupportedValuesQuery::Current(C2ParamField(&pl, &pl.level))
+ };
+ err = intf->querySupportedValues(levelQuery, C2_DONT_BLOCK);
+ ALOGV("query supported levels -> %s | %s",
+ asString(err), asString(levelQuery[0].status));
+ if (err == C2_OK && levelQuery[0].status == C2_OK) {
+ if (levelQuery[0].values.type == C2FieldSupportedValues::VALUES
+ && levelQuery[0].values.values.size() > 0) {
+ C2Value::Primitive level = levelQuery[0].values.values.back();
+ pl.level = (C2Config::level_t)level.ref<uint32_t>();
+ ALOGV("supporting level: %u", pl.level);
+ int32_t sdkProfile, sdkLevel;
+ if (mapper && mapper->mapProfile(pl.profile, &sdkProfile)
+ && mapper->mapLevel(pl.level, &sdkLevel)) {
+ caps->addProfileLevel(
+ (uint32_t)sdkProfile, (uint32_t)sdkLevel);
+ gotProfileLevels = true;
+ } else if (!mapper) {
+ caps->addProfileLevel(pl.profile, pl.level);
+ gotProfileLevels = true;
+ }
+
+ // for H.263 also advertise the second highest level if the
+ // codec supports level 45, as level 45 only covers level 10
+ // TODO: move this to some form of a setting so it does not
+ // have to be here
+ if (mediaType == MIMETYPE_VIDEO_H263) {
+ C2Config::level_t nextLevel = C2Config::LEVEL_UNUSED;
+ for (C2Value::Primitive v : levelQuery[0].values.values) {
+ C2Config::level_t level =
+ (C2Config::level_t)v.ref<uint32_t>();
+ if (level < C2Config::LEVEL_H263_45
+ && level > nextLevel) {
+ nextLevel = level;
+ }
+ }
+ if (nextLevel != C2Config::LEVEL_UNUSED
+ && nextLevel != pl.level
+ && mapper
+ && mapper->mapProfile(pl.profile, &sdkProfile)
+ && mapper->mapLevel(nextLevel, &sdkLevel)) {
+ caps->addProfileLevel(
+ (uint32_t)sdkProfile, (uint32_t)sdkLevel);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (!gotProfileLevels) {
+ if (mediaType == MIMETYPE_VIDEO_VP9) {
+ if (encoder) {
+ caps->addProfileLevel(VP9Profile0, VP9Level41);
+ } else {
+ caps->addProfileLevel(VP9Profile0, VP9Level5);
+ caps->addProfileLevel(VP9Profile2, VP9Level5);
+ caps->addProfileLevel(VP9Profile2HDR, VP9Level5);
+ }
+ } else if (mediaType == MIMETYPE_VIDEO_HEVC && !encoder) {
+ caps->addProfileLevel(HEVCProfileMain, HEVCMainTierLevel51);
+ caps->addProfileLevel(HEVCProfileMainStill, HEVCMainTierLevel51);
+ } else if (mediaType == MIMETYPE_VIDEO_VP8) {
+ if (encoder) {
+ caps->addProfileLevel(VP8ProfileMain, VP8Level_Version0);
+ } else {
+ caps->addProfileLevel(VP8ProfileMain, VP8Level_Version0);
+ }
+ } else if (mediaType == MIMETYPE_VIDEO_AVC) {
+ if (encoder) {
+ caps->addProfileLevel(AVCProfileBaseline, AVCLevel41);
+// caps->addProfileLevel(AVCProfileConstrainedBaseline, AVCLevel41);
+ caps->addProfileLevel(AVCProfileMain, AVCLevel41);
+ } else {
+ caps->addProfileLevel(AVCProfileBaseline, AVCLevel52);
+ caps->addProfileLevel(AVCProfileConstrainedBaseline, AVCLevel52);
+ caps->addProfileLevel(AVCProfileMain, AVCLevel52);
+ caps->addProfileLevel(AVCProfileConstrainedHigh, AVCLevel52);
+ caps->addProfileLevel(AVCProfileHigh, AVCLevel52);
+ }
+ } else if (mediaType == MIMETYPE_VIDEO_MPEG4) {
+ if (encoder) {
+ caps->addProfileLevel(MPEG4ProfileSimple, MPEG4Level2);
+ } else {
+ caps->addProfileLevel(MPEG4ProfileSimple, MPEG4Level3);
+ }
+ } else if (mediaType == MIMETYPE_VIDEO_H263) {
+ if (encoder) {
+ caps->addProfileLevel(H263ProfileBaseline, H263Level45);
+ } else {
+ caps->addProfileLevel(H263ProfileBaseline, H263Level30);
+ caps->addProfileLevel(H263ProfileBaseline, H263Level45);
+ caps->addProfileLevel(H263ProfileISWV2, H263Level30);
+ caps->addProfileLevel(H263ProfileISWV2, H263Level45);
+ }
+ } else if (mediaType == MIMETYPE_VIDEO_MPEG2 && !encoder) {
+ caps->addProfileLevel(MPEG2ProfileSimple, MPEG2LevelHL);
+ caps->addProfileLevel(MPEG2ProfileMain, MPEG2LevelHL);
+ }
+ }
+
+ // TODO: get this from intf() as well, but how do we map them to
+ // MediaCodec color formats?
+ if (mediaType.find("video") != std::string::npos) {
+ // vendor video codecs prefer opaque format
+ if (trait.name.find("android") == std::string::npos) {
+ caps->addColorFormat(COLOR_FormatSurface);
+ }
+ caps->addColorFormat(COLOR_FormatYUV420Flexible);
+ caps->addColorFormat(COLOR_FormatYUV420Planar);
+ caps->addColorFormat(COLOR_FormatYUV420SemiPlanar);
+ caps->addColorFormat(COLOR_FormatYUV420PackedPlanar);
+ caps->addColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
+ // framework video encoders must support surface format, though it is unclear
+ // that they will be able to map it if it is opaque
+ if (encoder && trait.name.find("android") != std::string::npos) {
+ caps->addColorFormat(COLOR_FormatSurface);
+ }
+ }
+ }
+ }
+ return OK;
+}
+
+} // namespace android
+
+extern "C" android::MediaCodecListBuilderBase *CreateBuilder() {
+ return new android::Codec2InfoBuilder;
+}
+
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.h b/media/codec2/sfplugin/Codec2InfoBuilder.h
new file mode 100644
index 0000000..30c189e
--- /dev/null
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_INFO_BUILDER_H_
+#define CODEC2_INFO_BUILDER_H_
+
+#include <media/stagefright/MediaCodecList.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+class Codec2InfoBuilder : public MediaCodecListBuilderBase {
+public:
+ Codec2InfoBuilder() = default;
+ ~Codec2InfoBuilder() override = default;
+ status_t buildMediaCodecList(MediaCodecListWriter* writer) override;
+};
+
+} // namespace android
+
+#endif // CODEC2_INFO_BUILDER_H_
diff --git a/media/codec2/sfplugin/InputSurfaceWrapper.h b/media/codec2/sfplugin/InputSurfaceWrapper.h
new file mode 100644
index 0000000..d9c4eec
--- /dev/null
+++ b/media/codec2/sfplugin/InputSurfaceWrapper.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INPUT_SURFACE_WRAPPER_H_
+#define INPUT_SURFACE_WRAPPER_H_
+
+#include <codec2/hidl/client.h>
+#include <system/graphics.h>
+
+namespace android {
+
+/**
+ * Wrapper interface around InputSurface.
+ */
+class InputSurfaceWrapper {
+public:
+ InputSurfaceWrapper()
+ : mDataSpace(HAL_DATASPACE_UNKNOWN) {
+ }
+
+ virtual ~InputSurfaceWrapper() = default;
+
+ /**
+ * Connect the surface with |comp|. A surface can
+ * connect to at most one component at a time.
+ *
+ * \return OK successfully connected to |comp|
+ * \return ALREADY_EXISTS already connected to another component.
+ */
+ virtual status_t connect(
+ const std::shared_ptr<Codec2Client::Component> &comp) = 0;
+
+ /**
+ * Disconnect the surface from the component if any.
+ */
+ virtual void disconnect() = 0;
+
+ /**
+ * Start pushing buffers to the surface.
+ */
+ virtual status_t start() = 0;
+
+ /**
+ * Ref: GraphicBufferSource::signalEndOfInputStream.
+ */
+ virtual status_t signalEndOfInputStream() = 0;
+
+ /// Input Surface configuration
+ struct Config {
+ // IN PARAMS (GBS)
+ float mMinFps; // minimum fps (repeat frame to achieve this)
+ float mMaxFps; // max fps (via frame drop)
+ float mCaptureFps; // capture fps
+ float mCodedFps; // coded fps
+ bool mSuspended; // suspended
+ int64_t mTimeOffsetUs; // time offset (input => codec)
+ int64_t mSuspendAtUs; // suspend/resume time
+ int64_t mStartAtUs; // start time
+ bool mStopped; // stopped
+ int64_t mStopAtUs; // stop time
+
+ // OUT PARAMS (GBS)
+ int64_t mInputDelayUs; // delay between encoder input and surface input
+
+ // IN PARAMS (CODEC WRAPPER)
+ float mFixedAdjustedFps; // fixed fps via PTS manipulation
+ float mMinAdjustedFps; // minimum fps via PTS manipulation
+ };
+
+ /**
+ * Configures input surface.
+ *
+ * \param config configuration. This can be updated during this call to provide output
+ * parameters, but not to provide configured parameters (to avoid continually
+ * reconfiguring)
+ */
+ virtual status_t configure(Config &config) = 0;
+
+ /**
+ * Configures desired data space.
+ *
+ * \param dataSpace desired data space
+ */
+ inline void setDataSpace(android_dataspace dataSpace) {
+ mDataSpace = dataSpace;
+ }
+
+protected:
+ android_dataspace mDataSpace;
+};
+
+} // namespace android
+
+#endif // INPUT_SURFACE_WRAPPER_H_
diff --git a/media/codec2/sfplugin/ReflectedParamUpdater.cpp b/media/codec2/sfplugin/ReflectedParamUpdater.cpp
new file mode 100644
index 0000000..880d4a5
--- /dev/null
+++ b/media/codec2/sfplugin/ReflectedParamUpdater.cpp
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ReflectedParamUpdater"
+#include <utils/Log.h>
+
+#include <iostream>
+#include <set>
+#include <sstream>
+
+#include <C2Debug.h>
+#include <C2ParamInternal.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+#include "ReflectedParamUpdater.h"
+
+namespace android {
+
+std::string ReflectedParamUpdater::Dict::debugString(size_t indent_) const {
+ std::string indent(indent_, ' ');
+ std::stringstream s;
+ s << "Dict {" << std::endl;
+
+ for (const auto &it : *this) {
+ s << indent << " ";
+
+ C2Value c2Value;
+ int32_t int32Value;
+ uint32_t uint32Value;
+ int64_t int64Value;
+ uint64_t uint64Value;
+ float floatValue;
+ sp<ABuffer> bufValue;
+ AString strValue;
+ if (it.second.find(&c2Value)) {
+ switch (c2Value.type()) {
+ case C2Value::INT32:
+ (void)c2Value.get(&int32Value);
+ s << "c2::i32 " << it.first << " = " << int32Value;
+ break;
+ case C2Value::UINT32:
+ (void)c2Value.get(&uint32Value);
+ s << "c2::u32 " << it.first << " = " << uint32Value;
+ break;
+ case C2Value::CNTR32:
+ // dump counter value as unsigned
+ (void)c2Value.get((c2_cntr32_t*)&uint32Value);
+ s << "c2::c32 " << it.first << " = " << uint32Value;
+ break;
+ case C2Value::INT64:
+ (void)c2Value.get(&int64Value);
+ s << "c2::i64 " << it.first << " = " << int64Value;
+ break;
+ case C2Value::UINT64:
+ (void)c2Value.get(&uint64Value);
+ s << "c2::u64 " << it.first << " = " << uint64Value;
+ break;
+ case C2Value::CNTR64:
+ // dump counter value as unsigned
+ (void)c2Value.get((c2_cntr64_t*)&uint64Value);
+ s << "c2::c64 " << it.first << " = " << uint64Value;
+ break;
+ case C2Value::FLOAT:
+ (void)c2Value.get(&floatValue);
+ s << "c2::float " << it.first << " = " << floatValue;
+ break;
+ default:
+ // dump unsupported values for debugging, these should not be used
+ s << "c2::unsupported " << it.first;
+ }
+ } else if (it.second.find(&int32Value)) {
+ s << "int32_t " << it.first << " = " << int32Value;
+ } else if (it.second.find(&int64Value)) {
+ s << "int64_t " << it.first << " = " << int64Value;
+ } else if (it.second.find(&strValue)) {
+ s << "string " << it.first << " = \"" << strValue.c_str() << "\"";
+ } else if (it.second.find(&bufValue)) {
+ s << "Buffer " << it.first << " = ";
+ if (bufValue != nullptr && bufValue->data() != nullptr && bufValue->size() <= 64) {
+ s << "{" << std::endl;
+ AString tmp;
+ hexdump(bufValue->data(), bufValue->size(), indent_ + 4, &tmp);
+ s << tmp.c_str() << indent << " }";
+ } else {
+ s << (void*)bufValue.get();
+ }
+ } else {
+ // dump unsupported values for debugging, this should never happen.
+ s << "unsupported " << it.first;
+ }
+ s << std::endl;
+ }
+ s << indent << "}";
+
+ return s.str();
+}
+
+void ReflectedParamUpdater::addParamDesc(
+ const std::shared_ptr<C2ParamReflector> &reflector,
+ const std::vector<std::shared_ptr<C2ParamDescriptor>> ¶mDescs) {
+ for (const std::shared_ptr<C2ParamDescriptor> &desc : paramDescs) {
+ std::unique_ptr<C2StructDescriptor> structDesc = reflector->describe(
+ desc->index().coreIndex());
+ if (structDesc == nullptr) {
+ ALOGD("Could not describe %s", desc->name().c_str());
+ continue;
+ }
+ addParamDesc(desc, *structDesc, reflector, true /* markVendor */);
+ }
+
+ // TEMP: also add vendor parameters as non-vendor
+ for (const std::shared_ptr<C2ParamDescriptor> &desc : paramDescs) {
+ if (!desc->index().isVendor()) {
+ continue;
+ }
+ std::unique_ptr<C2StructDescriptor> structDesc = reflector->describe(
+ desc->index().coreIndex());
+ if (structDesc) {
+ addParamDesc(desc, *structDesc, reflector, false /* markVendor */);
+ }
+ }
+}
+
+void ReflectedParamUpdater::addParamStructDesc(
+ std::shared_ptr<C2ParamDescriptor> desc,
+ C2String path,
+ size_t offset,
+ const C2StructDescriptor &structDesc,
+ const std::shared_ptr<C2ParamReflector> &reflector) {
+ for (auto it = structDesc.begin(); it != structDesc.end(); ++it) {
+ C2String fieldName = path + "." + it->name();
+ if (it->type() & C2FieldDescriptor::STRUCT_FLAG) {
+ if (reflector == nullptr || it->extent() != 1) {
+ ALOGD("ignored struct field %s", fieldName.c_str());
+ continue;
+ }
+ std::unique_ptr<C2StructDescriptor> structDesc_ = reflector->describe(
+ C2Param::CoreIndex(it->type()).coreIndex());
+ if (structDesc_ == nullptr) {
+ ALOGD("Could not describe structure of %s", fieldName.c_str());
+ continue;
+ }
+ addParamStructDesc(desc, fieldName, offset + _C2ParamInspector::GetOffset(*it),
+ *structDesc_, reflector);
+ continue;
+ }
+
+ // verify extent and type
+ switch (it->type()) {
+ case C2FieldDescriptor::INT32:
+ case C2FieldDescriptor::UINT32:
+ case C2FieldDescriptor::CNTR32:
+ case C2FieldDescriptor::INT64:
+ case C2FieldDescriptor::UINT64:
+ case C2FieldDescriptor::CNTR64:
+ case C2FieldDescriptor::FLOAT:
+ if (it->extent() != 1) {
+ ALOGD("extent() != 1 for single value type: %s", fieldName.c_str());
+ continue;
+ }
+ break;
+ case C2FieldDescriptor::STRING:
+ case C2FieldDescriptor::BLOB:
+ break;
+
+ default:
+ ALOGD("Unrecognized type: %s", fieldName.c_str());
+ continue;
+ }
+
+ ALOGV("%s registered", fieldName.c_str());
+ // TODO: get the proper size by iterating through the fields.
+ // only insert fields the very first time
+ mMap.emplace(fieldName, FieldDesc {
+ desc,
+ std::make_unique<C2FieldDescriptor>(
+ it->type(), it->extent(), it->name(),
+ _C2ParamInspector::GetOffset(*it),
+ _C2ParamInspector::GetSize(*it)),
+ offset,
+ });
+ }
+}
+
+void ReflectedParamUpdater::addParamDesc(
+ std::shared_ptr<C2ParamDescriptor> desc, const C2StructDescriptor &structDesc,
+ const std::shared_ptr<C2ParamReflector> &reflector, bool markVendor) {
+ C2String paramName = desc->name();
+
+ // prefix vendor parameters
+ if (desc->index().isVendor() && markVendor) {
+ paramName = "vendor." + paramName;
+ }
+ mParamNames.emplace(desc->index(), paramName);
+
+ // also allow setting whole parameters in a binary fashion via ByteBuffer
+ // this is opt-in for now
+ auto it = mWholeParams.find(paramName);
+ if (it != mWholeParams.end() && it->second.coreIndex() == desc->index().coreIndex()) {
+ mMap.emplace(paramName, FieldDesc{ desc, nullptr, 0 /* offset */ });
+ // don't add fields of whole parameters.
+ return;
+ }
+
+ addParamStructDesc(desc, paramName, 0 /* offset */, structDesc, reflector);
+}
+
+void ReflectedParamUpdater::supportWholeParam(std::string name, C2Param::CoreIndex index) {
+ mWholeParams.emplace(name, index);
+}
+
+std::string ReflectedParamUpdater::getParamName(C2Param::Index index) const {
+ auto it = mParamNames.find(index);
+ if (it != mParamNames.end()) {
+ return it->second;
+ }
+
+ std::stringstream ret;
+ ret << "<unknown " << index << ">";
+ return ret.str();
+}
+
+void ReflectedParamUpdater::getParamIndicesFromMessage(
+ const Dict ¶ms,
+ std::vector<C2Param::Index> *vec /* nonnull */) const {
+ CHECK(vec != nullptr);
+ vec->clear();
+ std::set<C2Param::Index> indices;
+ parseMessageAndDoWork(
+ params,
+ [&indices](const std::string &, const FieldDesc &desc, const void *, size_t) {
+ indices.insert(desc.paramDesc->index());
+ });
+ for (const C2Param::Index &index : indices) {
+ vec->push_back(index);
+ }
+}
+
+void ReflectedParamUpdater::getParamIndicesForKeys(
+ const std::vector<std::string> &keys,
+ std::vector<C2Param::Index> *vec /* nonnull */) const {
+ CHECK(vec != nullptr);
+ vec->clear();
+ std::set<C2Param::Index> indices;
+
+ std::set<std::string> keyMap(keys.begin(), keys.end());
+
+ ALOGV("in getParamIndicesForKeys with %zu keys and map of %zu entries",
+ keyMap.size(), mMap.size());
+ for (const std::pair<const std::string, FieldDesc> &kv : mMap) {
+ const std::string &name = kv.first;
+ const FieldDesc &desc = kv.second;
+ ALOGV("count of %s is %zu", name.c_str(), keyMap.count(name));
+ if (keyMap.count(name) > 0) {
+ indices.insert(desc.paramDesc->index());
+ }
+ }
+
+ for (const C2Param::Index &index : indices) {
+ vec->push_back(index);
+ }
+}
+
+void ReflectedParamUpdater::updateParamsFromMessage(
+ const Dict ¶ms,
+ std::vector<std::unique_ptr<C2Param>> *vec /* nonnull */) const {
+ CHECK(vec != nullptr);
+
+ std::map<C2Param::Index, std::unique_ptr<C2Param>*> paramsMap;
+ for (std::unique_ptr<C2Param> ¶m : *vec) {
+ if (param && *param) {
+ paramsMap[param->index()] = ¶m;
+ }
+ }
+
+ parseMessageAndDoWork(
+ params,
+ [¶msMap](const std::string &name, const FieldDesc &desc, const void *ptr, size_t size) {
+ std::unique_ptr<C2Param> *param = nullptr;
+ auto paramIt = paramsMap.find(desc.paramDesc->index());
+ if (paramIt == paramsMap.end()) {
+ ALOGD("%s found, but param #%d isn't present to update",
+ name.c_str(), (int32_t)desc.paramDesc->index());
+ return;
+ }
+ param = paramIt->second;
+
+ struct _C2Param : public C2Param {
+ using C2Param::C2Param;
+ _C2Param(uint32_t size, uint32_t index) : C2Param(size, index) { }
+ };
+
+ // we will handle whole param updates as part of a flexible param update using
+ // a zero offset.
+ size_t offset = 0;
+ size_t minOffset = 0;
+
+ // if this descriptor has a field, use the offset and size and ensure that offset
+ // is not part of the header
+ if (desc.fieldDesc) {
+ minOffset = sizeof(C2Param);
+ offset = sizeof(C2Param) + desc.offset
+ + _C2ParamInspector::GetOffset(*desc.fieldDesc);
+ }
+
+ // reallocate or trim flexible param (or whole param) as necessary
+ if (!desc.fieldDesc /* whole param */ || desc.fieldDesc->extent() == 0) {
+ // reallocate param if more space is needed
+ if (param->get()->size() < offset + size) {
+ if (size > INT32_MAX - offset || offset < minOffset) {
+ // size too long or offset too early - abandon
+ return;
+ }
+ C2Param *newParam = (C2Param *)::operator new(offset + size);
+ new (newParam) _C2Param(offset + size, param->get()->index());
+ if (offset > sizeof(C2Param)) {
+ memcpy(newParam + 1, param->get() + 1, offset - sizeof(C2Param));
+ }
+ param->reset(newParam);
+ } else if (param->get()->size() > offset + size) {
+ // trim parameter size
+ _C2ParamInspector::TrimParam(param->get(), offset + size);
+ }
+ } else if (desc.fieldDesc->type() == C2FieldDescriptor::BLOB) {
+ // zero fill blobs if updating with smaller blob
+ if (desc.fieldDesc->extent() > size) {
+ memset((uint8_t *)(param->get()) + offset + size, 0,
+ desc.fieldDesc->extent() - size);
+ }
+ }
+
+ memcpy((uint8_t *)(param->get()) + offset, ptr, size);
+ });
+}
+
+void ReflectedParamUpdater::parseMessageAndDoWork(
+ const Dict ¶ms,
+ std::function<void(const std::string &, const FieldDesc &, const void *, size_t)> work) const {
+ for (const std::pair<const std::string, FieldDesc> &kv : mMap) {
+ const std::string &name = kv.first;
+ const FieldDesc &desc = kv.second;
+ auto param = params.find(name);
+ if (param == params.end()) {
+ continue;
+ }
+
+ // handle whole parameters
+ if (!desc.fieldDesc) {
+ sp<ABuffer> tmp;
+ if (param->second.find(&tmp) && tmp != nullptr) {
+ C2Param *tmpAsParam = C2Param::From(tmp->data(), tmp->size());
+ if (tmpAsParam && tmpAsParam->type().type() == desc.paramDesc->index().type()) {
+ work(name, desc, tmp->data(), tmp->size());
+ } else {
+ ALOGD("Param blob does not match param for '%s' (%p, %x vs %x)",
+ name.c_str(), tmpAsParam, tmpAsParam ? tmpAsParam->type().type() : 0xDEADu,
+ desc.paramDesc->index().type());
+ }
+ }
+ continue;
+ }
+
+ int32_t int32Value;
+ int64_t int64Value;
+ C2Value c2Value;
+
+ C2FieldDescriptor::type_t fieldType = desc.fieldDesc->type();
+ size_t fieldExtent = desc.fieldDesc->extent();
+ switch (fieldType) {
+ case C2FieldDescriptor::INT32:
+ if ((param->second.find(&c2Value) && c2Value.get(&int32Value))
+ || param->second.find(&int32Value)) {
+ work(name, desc, &int32Value, sizeof(int32Value));
+ }
+ break;
+ case C2FieldDescriptor::UINT32:
+ if ((param->second.find(&c2Value) && c2Value.get((uint32_t*)&int32Value))
+ || param->second.find(&int32Value)) {
+ work(name, desc, &int32Value, sizeof(int32Value));
+ }
+ break;
+ case C2FieldDescriptor::CNTR32:
+ if ((param->second.find(&c2Value) && c2Value.get((c2_cntr32_t*)&int32Value))
+ || param->second.find(&int32Value)) {
+ work(name, desc, &int32Value, sizeof(int32Value));
+ }
+ break;
+ case C2FieldDescriptor::INT64:
+ if ((param->second.find(&c2Value) && c2Value.get(&int64Value))
+ || param->second.find(&int64Value)) {
+ work(name, desc, &int64Value, sizeof(int64Value));
+ }
+ break;
+ case C2FieldDescriptor::UINT64:
+ if ((param->second.find(&c2Value) && c2Value.get((uint64_t*)&int64Value))
+ || param->second.find(&int64Value)) {
+ work(name, desc, &int64Value, sizeof(int64Value));
+ }
+ break;
+ case C2FieldDescriptor::CNTR64:
+ if ((param->second.find(&c2Value) && c2Value.get((c2_cntr64_t*)&int64Value))
+ || param->second.find(&int64Value)) {
+ work(name, desc, &int64Value, sizeof(int64Value));
+ }
+ break;
+ case C2FieldDescriptor::FLOAT: {
+ float tmp;
+ if (param->second.find(&c2Value) && c2Value.get(&tmp)) {
+ work(name, desc, &tmp, sizeof(tmp));
+ }
+ break;
+ }
+ case C2FieldDescriptor::STRING: {
+ AString tmp;
+ if (!param->second.find(&tmp)) {
+ break;
+ }
+ if (fieldExtent > 0 && tmp.size() >= fieldExtent) {
+ AString truncated(tmp, 0, fieldExtent - 1);
+ ALOGD("String value too long to fit: original \"%s\" truncated to \"%s\"",
+ tmp.c_str(), truncated.c_str());
+ tmp = truncated;
+ }
+ work(name, desc, tmp.c_str(), tmp.size() + 1);
+ break;
+ }
+
+ case C2FieldDescriptor::BLOB: {
+ sp<ABuffer> tmp;
+ if (!param->second.find(&tmp) || tmp == nullptr) {
+ break;
+ }
+
+ if (fieldExtent > 0 && tmp->size() > fieldExtent) {
+ ALOGD("Blob value too long to fit. Truncating.");
+ tmp->setRange(tmp->offset(), fieldExtent);
+ }
+ work(name, desc, tmp->data(), tmp->size());
+ break;
+ }
+
+ default:
+ ALOGD("Unsupported data type for %s", name.c_str());
+ break;
+ }
+ }
+}
+
+ReflectedParamUpdater::Dict
+ReflectedParamUpdater::getParams(const std::vector<std::unique_ptr<C2Param>> ¶ms_) const {
+ std::vector<C2Param*> params;
+ params.resize(params_.size());
+ std::transform(params_.begin(), params_.end(), params.begin(),
+ [](const std::unique_ptr<C2Param>& p) -> C2Param* { return p.get(); });
+ return getParams(params);
+}
+
+ReflectedParamUpdater::Dict
+ReflectedParamUpdater::getParams(const std::vector<C2Param*> ¶ms) const {
+ Dict ret;
+
+ // convert vector to map
+ std::map<C2Param::Index, C2Param *> paramsMap;
+ for (C2Param *param : params) {
+ if (param != nullptr && *param) {
+ paramsMap[param->index()] = param;
+ }
+ }
+
+ for (const std::pair<const std::string, FieldDesc> &kv : mMap) {
+ const std::string &name = kv.first;
+ const FieldDesc &desc = kv.second;
+ if (paramsMap.count(desc.paramDesc->index()) == 0) {
+ continue;
+ }
+ C2Param *param = paramsMap[desc.paramDesc->index()];
+ Value value;
+
+ // handle whole params first
+ if (!desc.fieldDesc) {
+ sp<ABuffer> buf = ABuffer::CreateAsCopy(param, param->size());
+ value.set(buf);
+ ret.emplace(name, value);
+ continue;
+ }
+
+ size_t offset = sizeof(C2Param) + desc.offset
+ + _C2ParamInspector::GetOffset(*desc.fieldDesc);
+ uint8_t *data = (uint8_t *)param + offset;
+ C2FieldDescriptor::type_t fieldType = desc.fieldDesc->type();
+ switch (fieldType) {
+ case C2FieldDescriptor::STRING: {
+ size_t length = desc.fieldDesc->extent();
+ if (length == 0) {
+ length = param->size() - offset;
+ }
+
+ if (param->size() < length || param->size() - length < offset) {
+ ALOGD("param too small for string: length %zu size %zu offset %zu",
+ length, param->size(), offset);
+ break;
+ }
+ value.set(AString((char *)data, strnlen((char *)data, length)));
+ break;
+ }
+
+ case C2FieldDescriptor::BLOB: {
+ size_t length = desc.fieldDesc->extent();
+ if (length == 0) {
+ length = param->size() - offset;
+ }
+
+ if (param->size() < length || param->size() - length < offset) {
+ ALOGD("param too small for blob: length %zu size %zu offset %zu",
+ length, param->size(), offset);
+ break;
+ }
+
+ sp<ABuffer> buf = ABuffer::CreateAsCopy(data, length);
+ value.set(buf);
+ break;
+ }
+
+ default: {
+ size_t valueSize = C2Value::SizeFor((C2Value::type_t)fieldType);
+ if (param->size() < valueSize || param->size() - valueSize < offset) {
+ ALOGD("param too small for c2value: size %zu offset %zu",
+ param->size(), offset);
+ break;
+ }
+
+ C2Value c2Value;
+ switch (fieldType) {
+ case C2FieldDescriptor::INT32: c2Value = *((int32_t *)data); break;
+ case C2FieldDescriptor::UINT32: c2Value = *((uint32_t *)data); break;
+ case C2FieldDescriptor::CNTR32: c2Value = *((c2_cntr32_t *)data); break;
+ case C2FieldDescriptor::INT64: c2Value = *((int64_t *)data); break;
+ case C2FieldDescriptor::UINT64: c2Value = *((uint64_t *)data); break;
+ case C2FieldDescriptor::CNTR64: c2Value = *((c2_cntr64_t *)data); break;
+ case C2FieldDescriptor::FLOAT: c2Value = *((float *)data); break;
+ default:
+ ALOGD("Unsupported data type for %s", name.c_str());
+ continue;
+ }
+ value.set(c2Value);
+ }
+ }
+ ret.emplace(name, value);
+ }
+ return ret;
+}
+
+void ReflectedParamUpdater::clear() {
+ mMap.clear();
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/ReflectedParamUpdater.h b/media/codec2/sfplugin/ReflectedParamUpdater.h
new file mode 100644
index 0000000..5436ba5
--- /dev/null
+++ b/media/codec2/sfplugin/ReflectedParamUpdater.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef REFLECTED_PARAM_BUILDER_H_
+#define REFLECTED_PARAM_BUILDER_H_
+
+#include <map>
+#include <memory>
+
+#include <C2.h>
+#include <C2Param.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AData.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AString.h>
+
+namespace android {
+
+/**
+ * Utility class to query and update Codec 2.0 configuration values. Use custom dictionary as
+ * AMessage cannot represent all types of Codec 2.0 parameters and C2Value cannot represent
+ * all types of SDK values. We want to be precise when setting standard parameters (use C2Value
+ * for arithmetic values), but also support int32 and int64 for SDK values specifically for
+ * vendor parameters (as SDK API does not allow specifying proper type.) When querying fields,
+ * we can use C2Values as they are defined.
+ *
+ * Item => Codec 2.0 value mappings:
+ * CValue::type => type
+ * int32 => int32, ctr32 or uint32
+ * int64 => int64, ctr64 or uint64
+ * AString => string
+ * ABuffer => blob
+ * 'Rect' => C2RectStruct (not exposed in SDK as a rectangle)
+ */
+class ReflectedParamUpdater {
+public:
+ ReflectedParamUpdater() = default;
+ ~ReflectedParamUpdater() = default;
+
+ /**
+ * Element for values
+ */
+ struct Value : public AData<C2Value, int32_t, int64_t, AString, sp<ABuffer>>::Basic {
+ // allow construction from base types
+ Value() = default;
+ explicit Value(C2Value i) { set(i); }
+ explicit Value(int32_t i) { set(i); }
+ explicit Value(int64_t i) { set(i); }
+ explicit Value(const AString &i) { set(i); }
+ explicit Value(const sp<ABuffer> &i) { set(i); }
+ };
+
+ struct Dict : public std::map<std::string, Value> {
+ Dict() = default;
+ std::string debugString(size_t indent = 0) const;
+ };
+
+ /**
+ * Enumerates all fields of the parameter descriptors supplied, so that this opbject can later
+ * query and update these.
+ *
+ * For now only first-level fields are supported. Also, array fields are not supported.
+ *
+ * \param reflector C2ParamReflector object for C2Param reflection.
+ * \param paramDescs vector of C2ParamDescriptor objects that this object
+ * would recognize when building params.
+ */
+ void addParamDesc(
+ const std::shared_ptr<C2ParamReflector> &reflector,
+ const std::vector<std::shared_ptr<C2ParamDescriptor>> ¶mDescs);
+
+ /**
+ * Adds fields of a standard parameter (that may not be supported by the parameter reflector
+ * or may not be listed as a supported value by the component). If the parameter name is
+ * used for another parameter, this operation is a no-op. (Technically, this is by fields).
+ *
+ * \param T standard parameter type
+ * \param name parameter name
+ */
+ template<typename T>
+ void addStandardParam(const std::string &name, C2ParamDescriptor::attrib_t attrib =
+ C2ParamDescriptor::IS_READ_ONLY) {
+ addParamDesc(std::make_shared<C2ParamDescriptor>(
+ C2Param::Index(T::PARAM_TYPE), attrib, name.c_str()),
+ C2StructDescriptor((T*)nullptr), nullptr /* descriptor */);
+ }
+
+ /**
+ * Adds fields of a structure (or a parameater) described by the struct descriptor. If
+ * reflector is provided, fields of sub-structures are also added. Otherwise, only top-level
+ * fundamental typed fields (arithmetic, string and blob) are added.
+ *
+ * \param paramDesc parameter descriptor
+ * \param fieldDesc field descriptor
+ * \param path path/name of the structure (field or parent parameter)
+ * \param offset offset of the structure in the parameter
+ * \param reflector C2ParamReflector object for C2Param reflection (may be null)
+ */
+ void addParamStructDesc(
+ std::shared_ptr<C2ParamDescriptor> paramDesc, C2String path, size_t offset,
+ const C2StructDescriptor &structDesc,
+ const std::shared_ptr<C2ParamReflector> &reflector);
+
+ /**
+ * Adds fields of a parameter described by the struct descriptor. If reflector is provided,
+ * fields of sub-structures are also added. Otherwise, only top-level fundamental typed fields
+ * (arithmetic, string and blob) are added.
+ *
+ * \param paramDesc parameter descriptor
+ * \param fieldDesc field descriptor
+ * \param reflector C2ParamReflector object for C2Param reflection (may be null)
+ * \param markVendor TEMP if true, prefix vendor parameter names with "vendor."
+ */
+ void addParamDesc(
+ std::shared_ptr<C2ParamDescriptor> paramDesc, const C2StructDescriptor &structDesc,
+ const std::shared_ptr<C2ParamReflector> &reflector,
+ bool markVendor = true);
+
+ /**
+ * Add support for setting a parameter as a binary blob.
+ *
+ * \param name name of the parameter
+ * \param coreIndex parameter (core) index
+ */
+ void supportWholeParam(std::string name, C2Param::CoreIndex coreIndex);
+
+ /**
+ * Returns the name of the parameter for an index.
+ */
+ std::string getParamName(C2Param::Index index) const;
+
+ /**
+ * Get list of param indices from field names and values in AMessage object.
+ *
+ * TODO: This should be in the order that they are listed by the component.
+ *
+ * \param params[in] Dict object with field name to value pairs.
+ * \param vec[out] vector to store the indices from |params|.
+ */
+ void getParamIndicesFromMessage(
+ const Dict ¶ms,
+ std::vector<C2Param::Index> *vec /* nonnull */) const;
+
+ /**
+ * Get list of param indices from field names (only) in AMessage object.
+ *
+ * \param params[in] Vector object with field names.
+ * \param vec[out] vector to store the indices from |params|.
+ */
+ void getParamIndicesForKeys(
+ const std::vector<std::string> &keys,
+ std::vector<C2Param::Index> *vec /* nonnull */) const;
+
+ /**
+ * Update C2Param objects from field name and value in AMessage object.
+ *
+ * \param params[in] Dict object with field name to value pairs.
+ * \param vec[in,out] vector of the C2Param objects to be updated.
+ */
+ void updateParamsFromMessage(
+ const Dict ¶ms,
+ std::vector<std::unique_ptr<C2Param>> *vec /* nonnull */) const;
+
+ /**
+ * Get fields from C2Param objects in AMessage object.
+ *
+ * \param params[in] vector of the C2Param objects to be queried
+ * \return a Dict object containing the known parameters
+ */
+ Dict getParams(
+ const std::vector<C2Param*> ¶ms /* nonnull */) const;
+
+ Dict getParams(
+ const std::vector<std::unique_ptr<C2Param>> ¶ms /* nonnull */) const;
+
+ /**
+ * Clear param descriptors in this object.
+ */
+ void clear();
+
+private:
+ struct FieldDesc {
+ std::shared_ptr<C2ParamDescriptor> paramDesc;
+ std::unique_ptr<C2FieldDescriptor> fieldDesc;
+ size_t offset;
+ };
+ std::map<std::string, FieldDesc> mMap;
+ std::map<C2Param::Index, std::string> mParamNames;
+ std::map<std::string, C2Param::CoreIndex> mWholeParams;
+
+ void parseMessageAndDoWork(
+ const Dict ¶ms,
+ std::function<void(const std::string &, const FieldDesc &, const void *, size_t)> work) const;
+
+ C2_DO_NOT_COPY(ReflectedParamUpdater);
+};
+
+} // namespace android
+
+#endif // REFLECTED_PARAM_BUILDER_H_
diff --git a/media/codec2/sfplugin/SkipCutBuffer.cpp b/media/codec2/sfplugin/SkipCutBuffer.cpp
new file mode 100644
index 0000000..5762440
--- /dev/null
+++ b/media/codec2/sfplugin/SkipCutBuffer.cpp
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SkipCutBuffer"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/SkipCutBuffer.h>
+
+namespace android {
+
+SkipCutBuffer::SkipCutBuffer(size_t skip, size_t cut, size_t num16BitChannels) {
+
+ mWriteHead = 0;
+ mReadHead = 0;
+ mCapacity = 0;
+ mCutBuffer = nullptr;
+
+ if (num16BitChannels == 0 || num16BitChannels > INT32_MAX / 2) {
+ ALOGW("# channels out of range: %zu, using passthrough instead", num16BitChannels);
+ return;
+ }
+ size_t frameSize = num16BitChannels * 2;
+ if (skip > INT32_MAX / frameSize || cut > INT32_MAX / frameSize
+ || cut * frameSize > INT32_MAX - 4096) {
+ ALOGW("out of range skip/cut: %zu/%zu, using passthrough instead",
+ skip, cut);
+ return;
+ }
+ skip *= frameSize;
+ cut *= frameSize;
+
+ mFrontPadding = mSkip = skip;
+ mBackPadding = cut;
+ mCapacity = cut + 4096;
+ mCutBuffer = new (std::nothrow) char[mCapacity];
+ ALOGV("skipcutbuffer %zu %zu %d", skip, cut, mCapacity);
+}
+
+SkipCutBuffer::~SkipCutBuffer() {
+ delete[] mCutBuffer;
+}
+
+void SkipCutBuffer::submit(MediaBuffer *buffer) {
+ if (mCutBuffer == nullptr) {
+ // passthrough mode
+ return;
+ }
+
+ int32_t offset = buffer->range_offset();
+ int32_t buflen = buffer->range_length();
+
+ // drop the initial data from the buffer if needed
+ if (mFrontPadding > 0) {
+ // still data left to drop
+ int32_t to_drop = (buflen < mFrontPadding) ? buflen : mFrontPadding;
+ offset += to_drop;
+ buflen -= to_drop;
+ buffer->set_range(offset, buflen);
+ mFrontPadding -= to_drop;
+ }
+
+
+ // append data to cutbuffer
+ char *src = ((char*) buffer->data()) + offset;
+ write(src, buflen);
+
+
+ // the mediabuffer is now empty. Fill it from cutbuffer, always leaving
+ // at least mBackPadding bytes in the cutbuffer
+ char *dst = (char*) buffer->data();
+ size_t copied = read(dst, buffer->size());
+ buffer->set_range(0, copied);
+}
+
+template <typename T>
+void SkipCutBuffer::submitInternal(const sp<T>& buffer) {
+ if (mCutBuffer == nullptr) {
+ // passthrough mode
+ return;
+ }
+
+ int32_t offset = buffer->offset();
+ int32_t buflen = buffer->size();
+
+ // drop the initial data from the buffer if needed
+ if (mFrontPadding > 0) {
+ // still data left to drop
+ int32_t to_drop = (buflen < mFrontPadding) ? buflen : mFrontPadding;
+ offset += to_drop;
+ buflen -= to_drop;
+ buffer->setRange(offset, buflen);
+ mFrontPadding -= to_drop;
+ }
+
+
+ // append data to cutbuffer
+ char *src = (char*) buffer->data();
+ write(src, buflen);
+
+
+ // the mediabuffer is now empty. Fill it from cutbuffer, always leaving
+ // at least mBackPadding bytes in the cutbuffer
+ char *dst = (char*) buffer->base();
+ size_t copied = read(dst, buffer->capacity());
+ buffer->setRange(0, copied);
+}
+
+void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
+ submitInternal(buffer);
+}
+
+void SkipCutBuffer::submit(const sp<MediaCodecBuffer>& buffer) {
+ submitInternal(buffer);
+}
+
+void SkipCutBuffer::clear() {
+ mWriteHead = mReadHead = 0;
+ mFrontPadding = mSkip;
+}
+
+void SkipCutBuffer::write(const char *src, size_t num) {
+ int32_t sizeused = (mWriteHead - mReadHead);
+ if (sizeused < 0) sizeused += mCapacity;
+
+ // Everything must fit. Make sure the buffer is a little larger than needed,
+ // so there is no ambiguity as to whether mWriteHead == mReadHead means buffer
+ // full or empty
+ size_t available = mCapacity - sizeused - 32;
+ if (available < num) {
+ int32_t newcapacity = mCapacity + (num - available);
+ char * newbuffer = new char[newcapacity];
+ memcpy(newbuffer, mCutBuffer, mCapacity);
+ delete [] mCutBuffer;
+ mCapacity = newcapacity;
+ mCutBuffer = newbuffer;
+ ALOGV("reallocated buffer at size %d", newcapacity);
+ }
+
+ size_t copyfirst = (mCapacity - mWriteHead);
+ if (copyfirst > num) copyfirst = num;
+ if (copyfirst) {
+ memcpy(mCutBuffer + mWriteHead, src, copyfirst);
+ num -= copyfirst;
+ src += copyfirst;
+ mWriteHead += copyfirst;
+ CHECK_LE(mWriteHead, mCapacity);
+ if (mWriteHead == mCapacity) mWriteHead = 0;
+ if (num) {
+ memcpy(mCutBuffer, src, num);
+ mWriteHead += num;
+ }
+ }
+}
+
+size_t SkipCutBuffer::read(char *dst, size_t num) {
+ int32_t available = (mWriteHead - mReadHead);
+ if (available < 0) available += mCapacity;
+
+ available -= mBackPadding;
+ if (available <=0) {
+ return 0;
+ }
+ if (available < int32_t(num)) {
+ num = available;
+ }
+
+ size_t copyfirst = (mCapacity - mReadHead);
+ if (copyfirst > num) copyfirst = num;
+ if (copyfirst) {
+ memcpy(dst, mCutBuffer + mReadHead, copyfirst);
+ num -= copyfirst;
+ dst += copyfirst;
+ mReadHead += copyfirst;
+ CHECK_LE(mReadHead, mCapacity);
+ if (mReadHead == mCapacity) mReadHead = 0;
+ if (num) {
+ memcpy(dst, mCutBuffer, num);
+ mReadHead += num;
+ }
+ }
+ return available;
+}
+
+size_t SkipCutBuffer::size() {
+ int32_t available = (mWriteHead - mReadHead);
+ if (available < 0) available += mCapacity;
+ return available;
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/SkipCutBuffer.h b/media/codec2/sfplugin/SkipCutBuffer.h
new file mode 100644
index 0000000..0fb5690
--- /dev/null
+++ b/media/codec2/sfplugin/SkipCutBuffer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SKIP_CUT_BUFFER_H_
+
+#define SKIP_CUT_BUFFER_H_
+
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/foundation/ABuffer.h>
+
+namespace android {
+
+/**
+ * utility class to cut the start and end off a stream of data in MediaBuffers
+ *
+ */
+class SkipCutBuffer: public RefBase {
+ public:
+ // 'skip' is the number of frames to skip from the beginning
+ // 'cut' is the number of frames to cut from the end
+ // 'num16BitChannels' is the number of channels, which are assumed to be 16 bit wide each
+ SkipCutBuffer(size_t skip, size_t cut, size_t num16Channels);
+
+ // Submit one MediaBuffer for skipping and cutting. This may consume all or
+ // some of the data in the buffer, or it may add data to it.
+ // After this, the caller should continue processing the buffer as usual.
+ void submit(MediaBuffer *buffer);
+ void submit(const sp<ABuffer>& buffer); // same as above, but with an ABuffer
+ void submit(const sp<MediaCodecBuffer>& buffer); // same as above, but with an ABuffer
+ void clear();
+ size_t size(); // how many bytes are currently stored in the buffer
+
+ protected:
+ virtual ~SkipCutBuffer();
+
+ private:
+ void write(const char *src, size_t num);
+ size_t read(char *dst, size_t num);
+ template <typename T>
+ void submitInternal(const sp<T>& buffer);
+ int32_t mSkip;
+ int32_t mFrontPadding;
+ int32_t mBackPadding;
+ int32_t mWriteHead;
+ int32_t mReadHead;
+ int32_t mCapacity;
+ char* mCutBuffer;
+ DISALLOW_EVIL_CONSTRUCTORS(SkipCutBuffer);
+};
+
+} // namespace android
+
+#endif // OMX_CODEC_H_
diff --git a/media/codec2/sfplugin/tests/Android.bp b/media/codec2/sfplugin/tests/Android.bp
new file mode 100644
index 0000000..b08d3d6
--- /dev/null
+++ b/media/codec2/sfplugin/tests/Android.bp
@@ -0,0 +1,52 @@
+cc_test {
+ name: "ccodec_test",
+
+ srcs: [
+ "ReflectedParamUpdater_test.cpp",
+ ],
+
+ include_dirs: [
+ "hardware/google/av/media/sfplugin",
+ ],
+
+ shared_libs: [
+ "libstagefright_ccodec",
+ "libstagefright_codec2",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
+
+cc_test {
+ name: "mc_sanity",
+
+ srcs: [
+ "MediaCodec_sanity_test.cpp",
+ ],
+
+ include_dirs: [
+ "hardware/google/av/media/sfplugin",
+ ],
+
+ shared_libs: [
+ "libbinder",
+ "libgui",
+ "libmedia",
+ "libmedia_omx",
+ "libstagefright",
+ "libstagefright_ccodec",
+ "libstagefright_codec2",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
diff --git a/media/codec2/sfplugin/tests/MediaCodec_sanity_test.cpp b/media/codec2/sfplugin/tests/MediaCodec_sanity_test.cpp
new file mode 100644
index 0000000..ba3687b
--- /dev/null
+++ b/media/codec2/sfplugin/tests/MediaCodec_sanity_test.cpp
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <algorithm>
+
+#include <binder/ProcessState.h>
+#include <gtest/gtest.h>
+#include <gui/Surface.h>
+#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+class MediaCodecSanityTest : public ::testing::Test {
+public:
+ MediaCodecSanityTest()
+ : looper(new ALooper),
+ cfg(new AMessage),
+ ifmt(new AMessage),
+ ofmt(new AMessage) {
+ ProcessState::self()->startThreadPool();
+ looper->start();
+ }
+
+ ~MediaCodecSanityTest() {
+ if (codec != nullptr) {
+ codec->release();
+ }
+ looper->stop();
+ }
+
+ sp<ALooper> looper;
+ sp<MediaCodec> codec;
+ sp<AMessage> cfg;
+ sp<AMessage> ifmt;
+ sp<AMessage> ofmt;
+};
+
+const static size_t kLinearBufferSize = 1048576;
+
+// data for a codec input frame
+struct FrameData {
+ const uint8_t *data;
+ size_t size;
+ template<size_t N>
+ constexpr FrameData(const uint8_t(&data_)[N]) : data(data_), size(N) { }
+};
+
+// one yellow frame of 240x180 (albeit 4:4:4)
+const uint8_t avcStream_A1[] = { // IDR frame
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x64, 0x00, 0x0d, 0xac, 0xd9, 0x41, 0x41, 0xfa, 0x10, 0x00, 0x00,
+ 0x03, 0x00, 0x10, 0x00, 0x00, 0x03, 0x03, 0x20, 0xf1, 0x42, 0x99, 0x60,
+
+ 0x00, 0x00, 0x00, 0x01, 0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0,
+
+ 0x00, 0x00, 0x01, 0x65, 0x88, 0x84, 0x00, 0x2b, 0xff, 0xfe, 0xd8, 0xe7, 0xf3, 0x2c, 0xa5, 0x60,
+ 0xca, 0xbb, 0xf1, 0x5c, 0x44, 0x7c, 0x9a, 0xa5, 0xc3, 0xab, 0x2f, 0x77, 0x0a, 0x94, 0x0d, 0x19,
+ 0x43, 0x3b, 0x4f, 0x25, 0xea, 0x66, 0x00, 0x01, 0x24, 0xcd, 0x35, 0x5f, 0xc2, 0x34, 0x89, 0xd1,
+ 0xa5, 0x60, 0x09, 0x98, 0x00, 0x01, 0x1b, 0x0e, 0xcb, 0x0d, 0x04, 0x86, 0x94, 0xe2, 0x32, 0x3c,
+ 0xdd, 0x0f,
+};
+
+FrameData avcStream_A[] __unused = { avcStream_A1 };
+
+// AVC stream of 2 yellow frames (240x180)
+const uint8_t avcStream_B1[] = { // IDR frame
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x64, 0x00, 0x0c, 0xac, 0xd9, 0x41, 0x41, 0xfa, 0x10, 0x00, 0x00,
+ 0x03, 0x00, 0x10, 0x00, 0x00, 0x03, 0x02, 0x80, 0xf1, 0x42, 0x99, 0x60,
+
+ 0x00, 0x00, 0x00, 0x01, 0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0,
+
+ 0x00, 0x00, 0x01, 0x65, 0x88, 0x84, 0x00, 0x33, 0xff, 0xfe, 0xdf, 0x32, 0xf8, 0x14, 0xd6, 0x25,
+ 0xd0, 0x74, 0x42, 0x50, 0x84, 0x6f, 0xf4, 0xc2, 0x5c, 0x76, 0x37, 0x17, 0x72, 0xac, 0x52, 0xfc,
+ 0xd6, 0x1f, 0xd2, 0xd0, 0x60, 0xb2, 0x20, 0x00, 0x10, 0x3d, 0x2a, 0xc0, 0xe4, 0x27, 0xcb, 0xce,
+ 0xea, 0x25, 0x00, 0x81, 0x00, 0x00, 0x0f, 0x40, 0xbc, 0x81, 0x15, 0xc1, 0x65, 0x20, 0x80, 0x81,
+ 0x7a, 0x57, 0x51,
+};
+
+const uint8_t avcStream_B2[] = { // P frame
+ 0x00, 0x00, 0x00, 0x01, 0x41, 0x9a, 0x21, 0x6c, 0x42, 0xbf, 0xfe, 0x38, 0x40, 0x00, 0x0d, 0x48,
+};
+
+FrameData avcStream_B[] = { avcStream_B1, avcStream_B2 };
+
+class MediaCodecInputBufferSizeTest : public MediaCodecSanityTest,
+ public ::testing::WithParamInterface<int32_t> {
+};
+
+TEST_P(MediaCodecInputBufferSizeTest, TestAvcDecoder) {
+ codec = MediaCodec::CreateByComponentName(looper, "c2.android.avc.decoder");
+ cfg->setInt32("width", 320);
+ cfg->setInt32("height", 240);
+ cfg->setString("mime", MIMETYPE_VIDEO_AVC);
+
+ const int32_t InputSize = GetParam();
+ if (InputSize >= 0) {
+ cfg->setInt32("max-input-size", InputSize);
+ }
+
+ EXPECT_EQ(codec->configure(cfg, nullptr, nullptr, 0), OK);
+ EXPECT_EQ(codec->getInputFormat(&ifmt), OK);
+ int32_t maxInputSize;
+ ASSERT_TRUE(ifmt->findInt32("max-input-size", &maxInputSize));
+ if (InputSize > 0) {
+ EXPECT_EQ(maxInputSize, InputSize);
+ } else {
+ EXPECT_GE(maxInputSize, 1 << 20); // 1 MB
+ }
+ EXPECT_EQ(codec->start(), OK);
+ size_t ix;
+ EXPECT_EQ(codec->dequeueInputBuffer(&ix, 1000000), OK);
+ sp<MediaCodecBuffer> buf;
+ EXPECT_EQ(codec->getInputBuffer(ix, &buf), OK);
+ EXPECT_GE(buf->size(), (size_t)maxInputSize);
+ EXPECT_LE(buf->size(), (size_t)maxInputSize + 4096u);
+}
+
+TEST_P(MediaCodecInputBufferSizeTest, TestVideoDecoder) {
+ codec = MediaCodec::CreateByComponentName(looper, "c2.android.vp8.decoder");
+ cfg->setInt32("width", 320);
+ cfg->setInt32("height", 240);
+ cfg->setString("mime", MIMETYPE_VIDEO_VP8);
+
+ const int32_t InputSize = GetParam();
+ if (InputSize >= 0) {
+ cfg->setInt32("max-input-size", InputSize);
+ }
+
+ EXPECT_EQ(codec->configure(cfg, nullptr, nullptr, 0), OK);
+ EXPECT_EQ(codec->getInputFormat(&ifmt), OK);
+ int32_t maxInputSize;
+ ASSERT_TRUE(ifmt->findInt32("max-input-size", &maxInputSize));
+ if (InputSize > 0) {
+ EXPECT_EQ(maxInputSize, InputSize);
+ } else {
+ EXPECT_GE(maxInputSize, 1 << 20); // 1 MB
+ }
+ EXPECT_EQ(codec->start(), OK);
+ size_t ix;
+ EXPECT_EQ(codec->dequeueInputBuffer(&ix, 1000000), OK);
+ sp<MediaCodecBuffer> buf;
+ EXPECT_EQ(codec->getInputBuffer(ix, &buf), OK);
+ EXPECT_GE(buf->size(), (size_t)maxInputSize);
+ EXPECT_LE(buf->size(), (size_t)maxInputSize + 4096u);
+}
+
+TEST_P(MediaCodecInputBufferSizeTest, TestAudioDecoder) {
+ codec = MediaCodec::CreateByComponentName(looper, "c2.android.aac.decoder");
+ cfg->setInt32("sample-rate", 44100);
+ cfg->setInt32("channel-count", 2);
+ cfg->setString("mime", MIMETYPE_AUDIO_AAC);
+
+ const int32_t InputSize = GetParam();
+ if (InputSize >= 0) {
+ cfg->setInt32("max-input-size", InputSize);
+ }
+
+ EXPECT_EQ(codec->configure(cfg, nullptr, nullptr, 0), OK);
+ EXPECT_EQ(codec->getInputFormat(&ifmt), OK);
+ int32_t maxInputSize;
+ if (InputSize > 0) {
+ ASSERT_TRUE(ifmt->findInt32("max-input-size", &maxInputSize));
+ EXPECT_EQ(maxInputSize, InputSize);
+ } else {
+ if (ifmt->findInt32("max-input-size", &maxInputSize)) {
+ EXPECT_EQ(maxInputSize, 1 << 19); // 512 KB
+ }
+ maxInputSize = kLinearBufferSize; // input size is set by channel
+ }
+
+ EXPECT_EQ(codec->start(), OK);
+ size_t ix;
+ EXPECT_EQ(codec->dequeueInputBuffer(&ix, 1000000), OK);
+ sp<MediaCodecBuffer> buf;
+ EXPECT_EQ(codec->getInputBuffer(ix, &buf), OK);
+ EXPECT_GE(buf->size(), (size_t)maxInputSize);
+ EXPECT_LE(buf->size(), (size_t)maxInputSize + 4096u);
+}
+
+INSTANTIATE_TEST_CASE_P(InputSizes, MediaCodecInputBufferSizeTest, ::testing::Values(-1, 1234, 12345678));
+
+TEST_F(MediaCodecSanityTest, TestAvcDecoderHdrStaticInfo) {
+ codec = MediaCodec::CreateByComponentName(looper, "c2.android.avc.decoder");
+ cfg->setInt32("width", 320);
+ cfg->setInt32("height", 240);
+ cfg->setString("mime", MIMETYPE_VIDEO_AVC);
+ HDRStaticInfo info = { .mID = HDRStaticInfo::kType1, .sType1 = {
+ .mR = { .x = 35400, .y = 14600 }, .mG = { .x = 8500, .y = 39850 },
+ .mB = { .x = 6550, .y = 2300 }, .mW = { .x = 15635, .y = 16450 },
+ .mMaxDisplayLuminance = 1000, .mMinDisplayLuminance = 1000,
+ .mMaxContentLightLevel = 1000, .mMaxFrameAverageLightLevel = 120 }
+ };
+ cfg->setBuffer("hdr-static-info", ABuffer::CreateAsCopy(&info, sizeof(info)));
+
+ EXPECT_EQ(codec->configure(cfg, nullptr, nullptr, 0), OK);
+ EXPECT_EQ(codec->getOutputFormat(&ofmt), OK);
+ sp<ABuffer> oinfo;
+ ASSERT_TRUE(ofmt->findBuffer("hdr-static-info", &oinfo));
+ ASSERT_EQ(oinfo->size(), sizeof(info));
+ EXPECT_EQ(memcmp(oinfo->data(), &info, sizeof(info)), 0);
+
+ EXPECT_EQ(codec->start(), OK);
+ // assume we can submit all input before dequeuing output
+ size_t frameIx = 0;
+ size_t ix;
+ sp<MediaCodecBuffer> buf;
+ for (const FrameData &frame : avcStream_B) {
+ EXPECT_EQ(codec->dequeueInputBuffer(&ix, 1000000), OK);
+ EXPECT_EQ(codec->getInputBuffer(ix, &buf), OK);
+ ASSERT_GE(buf->capacity(), frame.size);
+ memcpy(buf->base(), frame.data, frame.size);
+ EXPECT_EQ(buf->setRange(0, frame.size), OK);
+ bool eos = ++frameIx == NELEM(avcStream_B);
+ EXPECT_EQ(codec->queueInputBuffer(ix, 0, frame.size, frameIx * 33333,
+ eos ? BUFFER_FLAG_END_OF_STREAM : 0), OK);
+ }
+
+ size_t offset, size;
+ int64_t ts;
+ uint32_t flags;
+ bool mInfoFormatChangedOk = true;
+ bool mInfoBuffersChangedOk = true;
+ while (true) {
+ status_t err = codec->dequeueOutputBuffer(&ix, &offset, &size, &ts, &flags, 1000000);
+ if (err == INFO_FORMAT_CHANGED && mInfoFormatChangedOk) {
+ mInfoFormatChangedOk = false;
+ } else if (err == INFO_OUTPUT_BUFFERS_CHANGED && mInfoBuffersChangedOk) {
+ mInfoBuffersChangedOk = false;
+ } else {
+ ASSERT_EQ(err, OK);
+ break;
+ }
+ }
+ EXPECT_EQ(codec->getOutputBuffer(ix, &buf), OK);
+ EXPECT_EQ(codec->getOutputFormat(ix, &ofmt), OK);
+ ASSERT_TRUE(ofmt->findBuffer("hdr-static-info", &oinfo));
+ ASSERT_EQ(oinfo->size(), sizeof(info));
+ EXPECT_EQ(memcmp(oinfo->data(), &info, sizeof(info)), 0);
+}
+
+TEST_F(MediaCodecSanityTest, TestVideoDecoderHdrStaticInfo) {
+ codec = MediaCodec::CreateByComponentName(looper, "c2.android.mpeg4.decoder");
+ cfg->setInt32("width", 320);
+ cfg->setInt32("height", 240);
+ cfg->setString("mime", MIMETYPE_VIDEO_MPEG4);
+ HDRStaticInfo info = { .mID = HDRStaticInfo::kType1, .sType1 = {
+ .mR = { .x = 35400, .y = 14600 }, .mG = { .x = 8500, .y = 39850 },
+ .mB = { .x = 6550, .y = 2300 }, .mW = { .x = 15635, .y = 16450 },
+ .mMaxDisplayLuminance = 1000, .mMinDisplayLuminance = 1000,
+ .mMaxContentLightLevel = 1000, .mMaxFrameAverageLightLevel = 120 }
+ };
+ cfg->setBuffer("hdr-static-info", ABuffer::CreateAsCopy(&info, sizeof(info)));
+
+ EXPECT_EQ(codec->configure(cfg, nullptr, nullptr, 0), OK);
+ EXPECT_EQ(codec->getOutputFormat(&ofmt), OK);
+ sp<ABuffer> oinfo;
+ ASSERT_TRUE(ofmt->findBuffer("hdr-static-info", &oinfo));
+ ASSERT_EQ(oinfo->size(), sizeof(info));
+ EXPECT_EQ(memcmp(oinfo->data(), &info, sizeof(info)), 0);
+}
+
+class MediaCodecByteBufferTest : public MediaCodecSanityTest,
+ public ::testing::WithParamInterface<int32_t> {
+};
+
+TEST_P(MediaCodecByteBufferTest, TestVideoDecoder420Planar) {
+ codec = MediaCodec::CreateByComponentName(looper, "c2.android.avc.decoder");
+// codec = MediaCodec::CreateByComponentName(looper, "OMX.google.h264.decoder");
+ cfg->setInt32("width", 320);
+ cfg->setInt32("height", 240);
+ cfg->setString("mime", MIMETYPE_VIDEO_AVC);
+ const int32_t Color = GetParam();
+ if (Color >= 0) {
+ cfg->setInt32("color-format", Color);
+ }
+ int32_t xcolor = Color == -1 ? COLOR_FormatYUV420Planar : Color;
+
+ EXPECT_EQ(codec->configure(cfg, nullptr, nullptr, 0), OK);
+ EXPECT_EQ(codec->getOutputFormat(&ofmt), OK);
+ int32_t ocolor = -1;
+ EXPECT_TRUE(ofmt->findInt32("color-format", &ocolor));
+ EXPECT_EQ(ocolor, xcolor);
+
+ EXPECT_EQ(codec->start(), OK);
+ // assume we can submit all input before dequeuing output
+ size_t frameIx = 0;
+ size_t ix;
+ sp<MediaCodecBuffer> buf;
+ for (const FrameData &frame : avcStream_A) {
+ EXPECT_EQ(codec->dequeueInputBuffer(&ix, 1000000), OK);
+ EXPECT_EQ(codec->getInputBuffer(ix, &buf), OK);
+ ASSERT_GE(buf->capacity(), frame.size);
+ memcpy(buf->base(), frame.data, frame.size);
+ EXPECT_EQ(buf->setRange(0, frame.size), OK);
+ bool eos = ++frameIx == NELEM(avcStream_A);
+ EXPECT_EQ(codec->queueInputBuffer(ix, 0, frame.size, frameIx * 33333,
+ eos ? BUFFER_FLAG_END_OF_STREAM : 0), OK);
+ }
+
+ size_t offset, size;
+ int64_t ts;
+ uint32_t flags;
+ bool mInfoFormatChangedOk = true;
+ bool mInfoBuffersChangedOk = true;
+ while (true) {
+ status_t err = codec->dequeueOutputBuffer(&ix, &offset, &size, &ts, &flags, 1000000);
+ if (err == INFO_FORMAT_CHANGED && mInfoFormatChangedOk) {
+ mInfoFormatChangedOk = false;
+ } else if (err == INFO_OUTPUT_BUFFERS_CHANGED && mInfoBuffersChangedOk) {
+ mInfoBuffersChangedOk = false;
+ } else {
+ ASSERT_EQ(err, OK);
+ break;
+ }
+ }
+ EXPECT_EQ(codec->getOutputBuffer(ix, &buf), OK);
+ EXPECT_EQ(codec->getOutputFormat(ix, &ofmt), OK);
+ ASSERT_TRUE(ofmt->findInt32("color-format", &ocolor));
+ EXPECT_EQ(ocolor, xcolor) << ofmt->debugString(8).c_str() << buf->meta()->debugString(8).c_str();
+ // expect an image-data in both format and meta
+ sp<ABuffer> imgBuf, imgBuf2;
+ ASSERT_TRUE(ofmt->findBuffer("image-data", &imgBuf));
+ ASSERT_TRUE(buf->meta()->findBuffer("image-data", &imgBuf2));
+ EXPECT_EQ(imgBuf->size(), sizeof(MediaImage2));
+ ASSERT_EQ(imgBuf->size(), imgBuf2->size());
+ EXPECT_EQ(0, memcmp(imgBuf->data(), imgBuf2->data(), imgBuf->size()));
+ MediaImage2 *img = (MediaImage2*)imgBuf->data();
+ EXPECT_EQ(img->mType, img->MEDIA_IMAGE_TYPE_YUV);
+ EXPECT_EQ(img->mNumPlanes, 3u);
+ EXPECT_EQ(img->mWidth, 320u);
+ EXPECT_EQ(img->mHeight, 240u);
+ EXPECT_EQ(img->mBitDepth, 8u);
+ EXPECT_EQ(img->mBitDepthAllocated, 8u);
+
+ // read strides from format
+ int32_t stride, vstride;
+ ofmt->findInt32("stride", &stride) || ofmt->findInt32("width", &stride);
+ ofmt->findInt32("slice-height", &vstride) || ofmt->findInt32("height", &vstride);
+
+ EXPECT_EQ(img->mPlane[img->Y].mHorizSubsampling, 1u);
+ EXPECT_EQ(img->mPlane[img->Y].mVertSubsampling, 1u);
+ EXPECT_EQ(img->mPlane[img->U].mHorizSubsampling, 2u);
+ EXPECT_EQ(img->mPlane[img->U].mVertSubsampling, 2u);
+ EXPECT_EQ(img->mPlane[img->V].mHorizSubsampling, 2u);
+ EXPECT_EQ(img->mPlane[img->V].mVertSubsampling, 2u);
+
+ switch (xcolor) {
+ // defined formats
+ case COLOR_FormatYUV420Planar:
+ case COLOR_FormatYUV420PackedPlanar:
+ EXPECT_EQ(img->mPlane[img->Y].mOffset, 0u);
+ EXPECT_EQ(img->mPlane[img->Y].mColInc, 1);
+ EXPECT_EQ(img->mPlane[img->Y].mRowInc, stride);
+
+ EXPECT_EQ(img->mPlane[img->U].mOffset, (uint32_t)(stride * vstride));
+ EXPECT_EQ(img->mPlane[img->U].mColInc, 1);
+ EXPECT_EQ(img->mPlane[img->U].mRowInc, stride / 2);
+
+ EXPECT_EQ(img->mPlane[img->V].mOffset, (uint32_t)(stride * vstride * 5 / 4));
+ EXPECT_EQ(img->mPlane[img->V].mColInc, 1);
+ EXPECT_EQ(img->mPlane[img->V].mRowInc, stride / 2);
+
+ EXPECT_GE(size, (size_t)(stride * vstride * 5 / 4 + stride / 2 * 119 + 160));
+ EXPECT_LE(size, (size_t)(stride * vstride * 3 / 2));
+ break;
+
+ case COLOR_FormatYUV420SemiPlanar:
+ case COLOR_FormatYUV420PackedSemiPlanar:
+ EXPECT_EQ(img->mPlane[img->Y].mOffset, 0u);
+ EXPECT_EQ(img->mPlane[img->Y].mColInc, 1);
+ EXPECT_EQ(img->mPlane[img->Y].mRowInc, stride);
+
+ EXPECT_EQ(img->mPlane[img->U].mOffset, (uint32_t)(stride * vstride));
+ EXPECT_EQ(img->mPlane[img->U].mColInc, 2);
+ EXPECT_EQ(img->mPlane[img->U].mRowInc, stride);
+
+ EXPECT_EQ(img->mPlane[img->V].mOffset, (uint32_t)(stride * vstride + 1));
+ EXPECT_EQ(img->mPlane[img->V].mColInc, 2);
+ EXPECT_EQ(img->mPlane[img->V].mRowInc, stride);
+
+ EXPECT_GE(size, (size_t)(stride * vstride + stride * 119 + 320));
+ EXPECT_LE(size, (size_t)(stride * vstride * 3 / 2));
+ break;
+
+ case COLOR_FormatYUV420Flexible:
+ // anything goes, but stride should match Y plane
+ EXPECT_EQ(img->mPlane[img->Y].mRowInc, stride);
+
+ EXPECT_GE(size,
+ std::max({
+ img->mPlane[img->Y].mOffset + 239 * img->mPlane[img->Y].mRowInc
+ + 319 * img->mPlane[img->Y].mColInc + 1,
+ img->mPlane[img->U].mOffset + 119 * img->mPlane[img->U].mRowInc
+ + 159 * img->mPlane[img->U].mColInc + 1,
+ img->mPlane[img->V].mOffset + 119 * img->mPlane[img->V].mRowInc
+ + 159 * img->mPlane[img->V].mColInc + 1 }));
+ break;
+
+ default:
+ break;
+ }
+
+ // validate all pixels
+#if 0
+ fprintf(stderr, "MediaImage { F(%ux%u) @%u+%d+%d @%u+%d+%d @%u+%d+%d }\n",
+ img->mWidth, img->mHeight,
+ img->mPlane[0].mOffset, img->mPlane[0].mColInc, img->mPlane[0].mRowInc,
+ img->mPlane[1].mOffset, img->mPlane[1].mColInc, img->mPlane[1].mRowInc,
+ img->mPlane[2].mOffset, img->mPlane[2].mColInc, img->mPlane[2].mRowInc);
+#endif
+ for (ix = 0; ix < 3; ++ix) {
+ const static uint8_t expected[] = { 210, 16, 146 };
+ for (uint32_t y = 0; y < img->mHeight / img->mPlane[ix].mVertSubsampling ; ++y) {
+ for (uint32_t x = 0; x < img->mWidth / img->mPlane[ix].mHorizSubsampling; ++x) {
+ uint8_t val = buf->data()[img->mPlane[ix].mOffset + img->mPlane[ix].mColInc * x
+ + img->mPlane[ix].mRowInc * y];
+ ASSERT_EQ(val, expected[ix]) << "incorrect value for plane "
+ << ix << " at x=" << x << ", y=" << y;
+ }
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InputSizes, MediaCodecByteBufferTest, ::testing::Values(
+ -1,
+ COLOR_FormatYUV420Planar,
+ COLOR_FormatYUV420SemiPlanar,
+ COLOR_FormatYUV420PackedPlanar,
+ COLOR_FormatYUV420PackedSemiPlanar,
+ COLOR_FormatYUV420Flexible));
+
+} // namespace android
diff --git a/media/codec2/sfplugin/tests/ReflectedParamUpdater_test.cpp b/media/codec2/sfplugin/tests/ReflectedParamUpdater_test.cpp
new file mode 100644
index 0000000..c7db0e3
--- /dev/null
+++ b/media/codec2/sfplugin/tests/ReflectedParamUpdater_test.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define __C2_GENERATE_GLOBAL_VARS__
+
+#include <set>
+
+#include <gtest/gtest.h>
+
+#include <C2ParamDef.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <ReflectedParamUpdater.h>
+
+namespace android {
+
+namespace {
+
+enum {
+ kParamIndexTestStart = 0x1000,
+ kParamIndexInt,
+ kParamIndexString,
+ kParamIndexComposite,
+ kParamIndexFlexString,
+
+ kParamIndexLong = C2Param::TYPE_INDEX_VENDOR_START,
+};
+
+typedef C2GlobalParam<C2Info, C2Int32Value, kParamIndexInt> C2IntInfo;
+typedef C2GlobalParam<C2Info, C2Int64Value, kParamIndexLong> C2LongInfo;
+
+struct C2FixedSizeStringStruct {
+ char value[12];
+
+ DEFINE_AND_DESCRIBE_BASE_C2STRUCT(FixedSizeString)
+ C2FIELD(value, "value")
+};
+typedef C2GlobalParam<C2Info, C2FixedSizeStringStruct, kParamIndexString> C2StringInfo;
+
+struct C2CompositeStruct {
+ int32_t i32;
+ uint64_t u64;
+ char str[12];
+ uint8_t blob[8];
+ uint8_t flexBlob[];
+
+ C2CompositeStruct() = default;
+
+ DEFINE_AND_DESCRIBE_BASE_FLEX_C2STRUCT(Composite, flexBlob)
+ C2FIELD(i32, "i32")
+ C2FIELD(u64, "u64")
+ C2FIELD(str, "str")
+ C2FIELD(blob, "blob")
+ C2FIELD(flexBlob, "flex-blob")
+};
+static_assert(C2CompositeStruct::FLEX_SIZE == 1, "");
+static_assert(_C2FlexHelper<C2CompositeStruct>::FLEX_SIZE == 1, "");
+typedef C2GlobalParam<C2Info, C2CompositeStruct, kParamIndexComposite> C2CompositeInfo;
+
+typedef C2GlobalParam<C2Info, C2StringValue, kParamIndexFlexString> C2FlexStringInfo;
+
+#define SUPPORTED_TYPES \
+ C2IntInfo, \
+ C2LongInfo, \
+ C2StringInfo, \
+ C2CompositeInfo, \
+ C2FlexStringInfo
+
+template<typename... TYPES> struct describe_impl;
+template<typename T, typename... TYPES> struct describe_impl<T, TYPES...> {
+ static std::unique_ptr<C2StructDescriptor> describe(C2Param::CoreIndex index) {
+ if (index == T::CORE_INDEX) {
+ return std::make_unique<C2StructDescriptor>(T::CORE_INDEX, T::FieldList());
+ } else {
+ return describe_impl<TYPES...>::describe(index);
+ }
+ }
+};
+
+template<> struct describe_impl<> {
+ static std::unique_ptr<C2StructDescriptor> describe(C2Param::CoreIndex) {
+ return nullptr;
+ }
+};
+
+template<typename T> const char *GetName() { return nullptr; }
+template<> const char *GetName<C2IntInfo>() { return "int"; }
+template<> const char *GetName<C2LongInfo>() { return "long"; }
+template<> const char *GetName<C2StringInfo>() { return "string"; }
+template<> const char *GetName<C2CompositeInfo>() { return "composite"; }
+template<> const char *GetName<C2FlexStringInfo>() { return "flex-string"; }
+
+template<typename... TYPES> struct fill_descriptors_impl;
+template<typename T, typename... TYPES> struct fill_descriptors_impl<T, TYPES...> {
+ static void fill(std::vector<std::shared_ptr<C2ParamDescriptor>> *vec) {
+ fill_descriptors_impl<TYPES...>::fill(vec);
+ vec->push_back(std::make_shared<C2ParamDescriptor>(
+ T::PARAM_TYPE, C2ParamDescriptor::IS_PERSISTENT, GetName<T>()));
+ }
+};
+
+template<> struct fill_descriptors_impl<> {
+ static void fill(std::vector<std::shared_ptr<C2ParamDescriptor>> *) {}
+};
+
+template<typename T> T *CastParam(const std::unique_ptr<C2Param> ¶m) {
+ return (T *)param.get();
+}
+
+class ParamReflector : public C2ParamReflector {
+public:
+ ParamReflector() = default;
+ ~ParamReflector() override = default;
+
+ std::unique_ptr<C2StructDescriptor> describe(C2Param::CoreIndex paramIndex) const override {
+ return describe_impl<SUPPORTED_TYPES>::describe(paramIndex);
+ }
+};
+
+} // namespace
+
+class ReflectedParamUpdaterTest : public ::testing::Test {
+public:
+ ReflectedParamUpdaterTest() : mReflector(new ParamReflector) {
+ fill_descriptors_impl<SUPPORTED_TYPES>::fill(&mDescriptors);
+ }
+
+ std::shared_ptr<C2ParamReflector> mReflector;
+ std::vector<std::shared_ptr<C2ParamDescriptor>> mDescriptors;
+};
+
+TEST_F(ReflectedParamUpdaterTest, SingleValueTest) {
+ ReflectedParamUpdater updater;
+
+ ReflectedParamUpdater::Dict msg;
+ msg.emplace("int.value", int32_t(12));
+ msg.emplace("vendor.long.value", int64_t(34));
+
+ updater.addParamDesc(mReflector, mDescriptors);
+
+ std::vector<C2Param::Index> indices;
+ updater.getParamIndicesFromMessage(msg, &indices);
+ EXPECT_EQ(1, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2IntInfo::PARAM_TYPE; }));
+ EXPECT_EQ(1, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2LongInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2StringInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2CompositeInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2FlexStringInfo::PARAM_TYPE; }));
+
+ std::vector<std::unique_ptr<C2Param>> params;
+ params.emplace_back(new C2IntInfo);
+ params.emplace_back(new C2LongInfo);
+ EXPECT_EQ(0, CastParam<C2IntInfo>(params[0])->value);
+ EXPECT_EQ(0, CastParam<C2LongInfo>(params[1])->value);
+
+ updater.updateParamsFromMessage(msg, ¶ms);
+ EXPECT_EQ(12, CastParam<C2IntInfo>(params[0])->value);
+ EXPECT_EQ(34, CastParam<C2LongInfo>(params[1])->value);
+
+ C2Value c2Value;
+ int32_t int32Value = 0;
+ int64_t int64Value = 0;
+ msg = updater.getParams(params);
+ ASSERT_EQ(1u, msg.count("int.value"));
+ EXPECT_EQ(true, msg["int.value"].find(&c2Value));
+ EXPECT_EQ(true, c2Value.get(&int32Value));
+ EXPECT_EQ(12, int32Value);
+
+ ASSERT_EQ(1u, msg.count("vendor.long.value"));
+ EXPECT_EQ(true, msg["vendor.long.value"].find(&c2Value));
+ EXPECT_EQ(true, c2Value.get(&int64Value));
+ EXPECT_EQ(34, int64Value);
+}
+
+TEST_F(ReflectedParamUpdaterTest, StringTest) {
+ ReflectedParamUpdater updater;
+
+ ReflectedParamUpdater::Dict msg;
+ msg.emplace("string.value", AString("56"));
+ msg.emplace("flex-string.value", AString("Some string"));
+ updater.addParamDesc(mReflector, mDescriptors);
+
+ std::vector<C2Param::Index> indices;
+ updater.getParamIndicesFromMessage(msg, &indices);
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2IntInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2LongInfo::PARAM_TYPE; }));
+ EXPECT_EQ(1, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2StringInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2CompositeInfo::PARAM_TYPE; }));
+ EXPECT_EQ(1, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2FlexStringInfo::PARAM_TYPE; }));
+
+ std::vector<std::unique_ptr<C2Param>> params;
+ params.emplace_back(new C2StringInfo);
+ EXPECT_EQ(0, CastParam<C2StringInfo>(params[0])->value[0]);
+ params.emplace_back(C2FlexStringInfo::AllocUnique(0));
+ EXPECT_EQ(0u, CastParam<C2FlexStringInfo>(params[1])->flexCount());
+ char *flexStringData = &CastParam<C2FlexStringInfo>(params[1])->m.value[0];
+
+ updater.updateParamsFromMessage(msg, ¶ms);
+ EXPECT_STREQ("56", CastParam<C2StringInfo>(params[0])->value);
+ EXPECT_EQ(12u, CastParam<C2FlexStringInfo>(params[0])->flexCount());
+ EXPECT_STREQ("Some string", CastParam<C2FlexStringInfo>(params[1])->m.value);
+ EXPECT_NE(flexStringData, &CastParam<C2FlexStringInfo>(params[1])->m.value[0]);
+ flexStringData = &CastParam<C2FlexStringInfo>(params[1])->m.value[0];
+
+ // verify truncation and in-place update
+ msg["string.value"] = ReflectedParamUpdater::Value(AString("1234567890ABCDE"));
+ msg["flex-string.value"] = ReflectedParamUpdater::Value(AString("abc"));
+ updater.updateParamsFromMessage(msg, ¶ms);
+ EXPECT_STREQ("1234567890A", CastParam<C2StringInfo>(params[0])->value);
+ EXPECT_EQ(4u, CastParam<C2FlexStringInfo>(params[1])->flexCount());
+ EXPECT_STREQ("abc", CastParam<C2FlexStringInfo>(params[1])->m.value);
+ EXPECT_EQ(flexStringData, &CastParam<C2FlexStringInfo>(params[1])->m.value[0]);
+
+ AString strValue;
+ msg = updater.getParams(params);
+ ASSERT_EQ(1u, msg.count("string.value"));
+ EXPECT_EQ(true, msg["string.value"].find(&strValue));
+ EXPECT_STREQ("1234567890A", strValue.c_str());
+
+ ASSERT_EQ(1u, msg.count("flex-string.value"));
+ EXPECT_EQ(true, msg["flex-string.value"].find(&strValue));
+ EXPECT_STREQ("abc", strValue.c_str());
+}
+
+TEST_F(ReflectedParamUpdaterTest, CompositeTest) {
+ ReflectedParamUpdater updater;
+
+ ReflectedParamUpdater::Dict msg;
+ msg.emplace("composite.i32", int32_t(78));
+ msg.emplace("composite.u64", int64_t(910));
+ msg.emplace("composite.str", AString("1112"));
+ msg.emplace("composite.blob", ABuffer::CreateAsCopy("buffer08", 8));
+ msg.emplace("composite.flex-blob", ABuffer::CreateAsCopy("flex-buffer-14", 14));
+
+ updater.addParamDesc(mReflector, mDescriptors);
+
+ std::vector<C2Param::Index> indices;
+ updater.getParamIndicesFromMessage(msg, &indices);
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2IntInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2LongInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2StringInfo::PARAM_TYPE; }));
+ EXPECT_EQ(1, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2CompositeInfo::PARAM_TYPE; }));
+
+ std::vector<std::unique_ptr<C2Param>> params;
+ params.emplace_back(C2CompositeInfo::AllocUnique(0));
+ EXPECT_EQ(0, CastParam<C2CompositeInfo>(params[0])->m.i32);
+ EXPECT_EQ(0u, CastParam<C2CompositeInfo>(params[0])->m.u64);
+ EXPECT_EQ(0, CastParam<C2CompositeInfo>(params[0])->m.str[0]);
+ EXPECT_EQ(0, memcmp("\0\0\0\0\0\0\0\0", CastParam<C2CompositeInfo>(params[0])->m.blob, 8));
+ EXPECT_EQ(0u, CastParam<C2CompositeInfo>(params[0])->flexCount());
+ uint8_t *flexBlobData = &CastParam<C2CompositeInfo>(params[0])->m.flexBlob[0];
+
+ updater.updateParamsFromMessage(msg, ¶ms);
+ EXPECT_EQ(78, CastParam<C2CompositeInfo>(params[0])->m.i32);
+ EXPECT_EQ(910u, CastParam<C2CompositeInfo>(params[0])->m.u64);
+ EXPECT_STREQ("1112", CastParam<C2CompositeInfo>(params[0])->m.str);
+ EXPECT_EQ(0, memcmp("buffer08", CastParam<C2CompositeInfo>(params[0])->m.blob, 8));
+ AString hex;
+ hexdump(CastParam<C2CompositeInfo>(params[0])->m.blob, 8, 0, &hex);
+ printf("%s\n", hex.c_str());
+ ASSERT_EQ(14u, CastParam<C2CompositeInfo>(params[0])->flexCount());
+ EXPECT_EQ(0, memcmp("flex-buffer-14", CastParam<C2CompositeInfo>(params[0])->m.flexBlob, 14));
+ EXPECT_NE(flexBlobData, &CastParam<C2CompositeInfo>(params[0])->m.flexBlob[0]);
+ flexBlobData = &CastParam<C2CompositeInfo>(params[0])->m.flexBlob[0];
+
+ // test setting and zero extending shorter blob than allowed
+ msg.clear();
+ msg.emplace("composite.blob", ABuffer::CreateAsCopy("buf05", 5));
+ updater.updateParamsFromMessage(msg, ¶ms);
+ EXPECT_EQ(0, memcmp("buf05\0\0\0", CastParam<C2CompositeInfo>(params[0])->m.blob, 8));
+ ASSERT_EQ(14u, CastParam<C2CompositeInfo>(params[0])->flexCount());
+ EXPECT_EQ(0, memcmp("flex-buffer-14", CastParam<C2CompositeInfo>(params[0])->m.flexBlob, 14));
+ EXPECT_EQ(flexBlobData, &CastParam<C2CompositeInfo>(params[0])->m.flexBlob[0]);
+
+ // test setting and trimming larger blob than allowed
+ msg.clear();
+ msg.emplace("composite.blob", ABuffer::CreateAsCopy("ReallyLongBuffer", 16));
+ updater.updateParamsFromMessage(msg, ¶ms);
+ EXPECT_EQ(0, memcmp("ReallyLo", CastParam<C2CompositeInfo>(params[0])->m.blob, 8));
+ ASSERT_EQ(14u, CastParam<C2CompositeInfo>(params[0])->flexCount());
+ EXPECT_EQ(0, memcmp("flex-buffer-14", CastParam<C2CompositeInfo>(params[0])->m.flexBlob, 14));
+ EXPECT_EQ(flexBlobData, &CastParam<C2CompositeInfo>(params[0])->m.flexBlob[0]);
+
+ // test trimming flex blob in-place
+ msg.clear();
+ msg.emplace("composite.flex-blob", ABuffer::CreateAsCopy("buf05", 5));
+ updater.updateParamsFromMessage(msg, ¶ms);
+ ASSERT_EQ(5u, CastParam<C2CompositeInfo>(params[0])->flexCount());
+ EXPECT_EQ(0, memcmp("buf05", CastParam<C2CompositeInfo>(params[0])->m.flexBlob, 5));
+ EXPECT_EQ(flexBlobData, &CastParam<C2CompositeInfo>(params[0])->m.flexBlob[0]);
+}
+
+TEST_F(ReflectedParamUpdaterTest, CompositePartialTest) {
+ ReflectedParamUpdater updater;
+
+ ReflectedParamUpdater::Dict msg;
+ msg.emplace("composite.i32", C2Value(1314));
+ msg.emplace("composite.str", AString("1516"));
+
+ updater.addParamDesc(mReflector, mDescriptors);
+
+ std::vector<C2Param::Index> indices;
+ updater.getParamIndicesFromMessage(msg, &indices);
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2IntInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2LongInfo::PARAM_TYPE; }));
+ EXPECT_EQ(0, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2StringInfo::PARAM_TYPE; }));
+ EXPECT_EQ(1, std::count_if(indices.begin(), indices.end(),
+ [](const auto &value) { return (uint32_t)value == C2CompositeInfo::PARAM_TYPE; }));
+
+ std::vector<std::unique_ptr<C2Param>> params;
+ params.emplace_back(C2CompositeInfo::AllocUnique(12u));
+ EXPECT_EQ(0, CastParam<C2CompositeInfo>(params[0])->m.i32);
+ EXPECT_EQ(0u, CastParam<C2CompositeInfo>(params[0])->m.u64);
+ EXPECT_EQ(0, CastParam<C2CompositeInfo>(params[0])->m.str[0]);
+
+ updater.updateParamsFromMessage(msg, ¶ms);
+ EXPECT_EQ(1314, CastParam<C2CompositeInfo>(params[0])->m.i32);
+ EXPECT_EQ(0u, CastParam<C2CompositeInfo>(params[0])->m.u64);
+ EXPECT_STREQ("1516", CastParam<C2CompositeInfo>(params[0])->m.str);
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
new file mode 100644
index 0000000..3dc6060
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -0,0 +1,39 @@
+cc_library_shared {
+ name: "libstagefright_ccodec_utils",
+ vendor_available: true,
+
+ srcs: [
+ "Codec2BufferUtils.cpp",
+ "Codec2Mapper.cpp",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ export_include_dirs: [
+ ".",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "liblog",
+ "libstagefright_codec2",
+ "libstagefright_codec2_vndk",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+}
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
new file mode 100644
index 0000000..b7519da
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2BufferUtils"
+#include <utils/Log.h>
+
+#include <list>
+#include <mutex>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+#include <C2Debug.h>
+
+#include "Codec2BufferUtils.h"
+
+namespace android {
+
+namespace {
+
+/**
+ * A flippable, optimizable memcpy. Constructs such as (from ? src : dst) do not work as the results are
+ * always const.
+ */
+template<bool ToA, size_t S>
+struct MemCopier {
+ template<typename A, typename B>
+ inline static void copy(A *a, const B *b, size_t size) {
+ __builtin_memcpy(a, b, size);
+ }
+};
+
+template<size_t S>
+struct MemCopier<false, S> {
+ template<typename A, typename B>
+ inline static void copy(const A *a, B *b, size_t size) {
+ MemCopier<true, S>::copy(b, a, size);
+ }
+};
+
+/**
+ * Copies between a MediaImage and a graphic view.
+ *
+ * \param ToMediaImage whether to copy to (or from) the MediaImage
+ * \param view graphic view (could be ConstGraphicView or GraphicView depending on direction)
+ * \param img MediaImage data
+ * \param imgBase base of MediaImage (could be const uint8_t* or uint8_t* depending on direction)
+ */
+template<bool ToMediaImage, typename View, typename ImagePixel>
+static status_t _ImageCopy(View &view, const MediaImage2 *img, ImagePixel *imgBase) {
+ // TODO: more efficient copying --- e.g. one row at a time, copying
+ // interleaved planes together, etc.
+ const C2PlanarLayout &layout = view.layout();
+ const size_t bpp = divUp(img->mBitDepthAllocated, 8u);
+ if (view.width() != img->mWidth
+ || view.height() != img->mHeight) {
+ return BAD_VALUE;
+ }
+ for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+ typename std::conditional<ToMediaImage, uint8_t, const uint8_t>::type *imgRow =
+ imgBase + img->mPlane[i].mOffset;
+ typename std::conditional<ToMediaImage, const uint8_t, uint8_t>::type *viewRow =
+ viewRow = view.data()[i];
+ const C2PlaneInfo &plane = layout.planes[i];
+ if (plane.colSampling != img->mPlane[i].mHorizSubsampling
+ || plane.rowSampling != img->mPlane[i].mVertSubsampling
+ || plane.allocatedDepth != img->mBitDepthAllocated
+ || plane.allocatedDepth < plane.bitDepth
+ // MediaImage only supports MSB values
+ || plane.rightShift != plane.allocatedDepth - plane.bitDepth
+ || (bpp > 1 && plane.endianness != plane.NATIVE)) {
+ return BAD_VALUE;
+ }
+
+ uint32_t planeW = img->mWidth / plane.colSampling;
+ uint32_t planeH = img->mHeight / plane.rowSampling;
+ for (uint32_t row = 0; row < planeH; ++row) {
+ decltype(imgRow) imgPtr = imgRow;
+ decltype(viewRow) viewPtr = viewRow;
+ for (uint32_t col = 0; col < planeW; ++col) {
+ MemCopier<ToMediaImage, 0>::copy(imgPtr, viewPtr, bpp);
+ imgPtr += img->mPlane[i].mColInc;
+ viewPtr += plane.colInc;
+ }
+ imgRow += img->mPlane[i].mRowInc;
+ viewRow += plane.rowInc;
+ }
+ }
+ return OK;
+}
+
+} // namespace
+
+status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view) {
+ return _ImageCopy<true>(view, img, imgBase);
+}
+
+status_t ImageCopy(C2GraphicView &view, const uint8_t *imgBase, const MediaImage2 *img) {
+ return _ImageCopy<false>(view, img, imgBase);
+}
+
+bool IsYUV420(const C2GraphicView &view) {
+ const C2PlanarLayout &layout = view.layout();
+ return (layout.numPlanes == 3
+ && layout.type == C2PlanarLayout::TYPE_YUV
+ && layout.planes[layout.PLANE_Y].channel == C2PlaneInfo::CHANNEL_Y
+ && layout.planes[layout.PLANE_Y].allocatedDepth == 8
+ && layout.planes[layout.PLANE_Y].bitDepth == 8
+ && layout.planes[layout.PLANE_Y].rightShift == 0
+ && layout.planes[layout.PLANE_Y].colSampling == 1
+ && layout.planes[layout.PLANE_Y].rowSampling == 1
+ && layout.planes[layout.PLANE_U].channel == C2PlaneInfo::CHANNEL_CB
+ && layout.planes[layout.PLANE_U].allocatedDepth == 8
+ && layout.planes[layout.PLANE_U].bitDepth == 8
+ && layout.planes[layout.PLANE_U].rightShift == 0
+ && layout.planes[layout.PLANE_U].colSampling == 2
+ && layout.planes[layout.PLANE_U].rowSampling == 2
+ && layout.planes[layout.PLANE_V].channel == C2PlaneInfo::CHANNEL_CR
+ && layout.planes[layout.PLANE_V].allocatedDepth == 8
+ && layout.planes[layout.PLANE_V].bitDepth == 8
+ && layout.planes[layout.PLANE_V].rightShift == 0
+ && layout.planes[layout.PLANE_V].colSampling == 2
+ && layout.planes[layout.PLANE_V].rowSampling == 2);
+}
+
+MediaImage2 CreateYUV420PlanarMediaImage2(
+ uint32_t width, uint32_t height, uint32_t stride, uint32_t vstride) {
+ return MediaImage2 {
+ .mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV,
+ .mNumPlanes = 3,
+ .mWidth = width,
+ .mHeight = height,
+ .mBitDepth = 8,
+ .mBitDepthAllocated = 8,
+ .mPlane = {
+ {
+ .mOffset = 0,
+ .mColInc = 1,
+ .mRowInc = (int32_t)stride,
+ .mHorizSubsampling = 1,
+ .mVertSubsampling = 1,
+ },
+ {
+ .mOffset = stride * vstride,
+ .mColInc = 1,
+ .mRowInc = (int32_t)stride / 2,
+ .mHorizSubsampling = 2,
+ .mVertSubsampling = 2,
+ },
+ {
+ .mOffset = stride * vstride * 5 / 4,
+ .mColInc = 1,
+ .mRowInc = (int32_t)stride / 2,
+ .mHorizSubsampling = 2,
+ .mVertSubsampling = 2,
+ }
+ },
+ };
+}
+
+MediaImage2 CreateYUV420SemiPlanarMediaImage2(
+ uint32_t width, uint32_t height, uint32_t stride, uint32_t vstride) {
+ return MediaImage2 {
+ .mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV,
+ .mNumPlanes = 3,
+ .mWidth = width,
+ .mHeight = height,
+ .mBitDepth = 8,
+ .mBitDepthAllocated = 8,
+ .mPlane = {
+ {
+ .mOffset = 0,
+ .mColInc = 1,
+ .mRowInc = (int32_t)stride,
+ .mHorizSubsampling = 1,
+ .mVertSubsampling = 1,
+ },
+ {
+ .mOffset = stride * vstride,
+ .mColInc = 2,
+ .mRowInc = (int32_t)stride,
+ .mHorizSubsampling = 2,
+ .mVertSubsampling = 2,
+ },
+ {
+ .mOffset = stride * vstride + 1,
+ .mColInc = 2,
+ .mRowInc = (int32_t)stride,
+ .mHorizSubsampling = 2,
+ .mVertSubsampling = 2,
+ }
+ },
+ };
+}
+
+status_t ConvertRGBToPlanarYUV(
+ uint8_t *dstY, size_t dstStride, size_t dstVStride, size_t bufferSize,
+ const C2GraphicView &src) {
+ CHECK(dstY != nullptr);
+ CHECK((src.width() & 1) == 0);
+ CHECK((src.height() & 1) == 0);
+
+ if (dstStride * dstVStride * 3 / 2 > bufferSize) {
+ ALOGD("conversion buffer is too small for converting from RGB to YUV");
+ return NO_MEMORY;
+ }
+
+ uint8_t *dstU = dstY + dstStride * dstVStride;
+ uint8_t *dstV = dstU + (dstStride >> 1) * (dstVStride >> 1);
+
+ const C2PlanarLayout &layout = src.layout();
+ const uint8_t *pRed = src.data()[C2PlanarLayout::PLANE_R];
+ const uint8_t *pGreen = src.data()[C2PlanarLayout::PLANE_G];
+ const uint8_t *pBlue = src.data()[C2PlanarLayout::PLANE_B];
+
+#define CLIP3(x,y,z) (((z) < (x)) ? (x) : (((z) > (y)) ? (y) : (z)))
+ for (size_t y = 0; y < src.height(); ++y) {
+ for (size_t x = 0; x < src.width(); ++x) {
+ uint8_t red = *pRed;
+ uint8_t green = *pGreen;
+ uint8_t blue = *pBlue;
+
+ // using ITU-R BT.601 conversion matrix
+ unsigned luma =
+ CLIP3(0, (((red * 66 + green * 129 + blue * 25) >> 8) + 16), 255);
+
+ dstY[x] = luma;
+
+ if ((x & 1) == 0 && (y & 1) == 0) {
+ unsigned U =
+ CLIP3(0, (((-red * 38 - green * 74 + blue * 112) >> 8) + 128), 255);
+
+ unsigned V =
+ CLIP3(0, (((red * 112 - green * 94 - blue * 18) >> 8) + 128), 255);
+
+ dstU[x >> 1] = U;
+ dstV[x >> 1] = V;
+ }
+ pRed += layout.planes[C2PlanarLayout::PLANE_R].colInc;
+ pGreen += layout.planes[C2PlanarLayout::PLANE_G].colInc;
+ pBlue += layout.planes[C2PlanarLayout::PLANE_B].colInc;
+ }
+
+ if ((y & 1) == 0) {
+ dstU += dstStride >> 1;
+ dstV += dstStride >> 1;
+ }
+
+ pRed -= layout.planes[C2PlanarLayout::PLANE_R].colInc * src.width();
+ pGreen -= layout.planes[C2PlanarLayout::PLANE_G].colInc * src.width();
+ pBlue -= layout.planes[C2PlanarLayout::PLANE_B].colInc * src.width();
+ pRed += layout.planes[C2PlanarLayout::PLANE_R].rowInc;
+ pGreen += layout.planes[C2PlanarLayout::PLANE_G].rowInc;
+ pBlue += layout.planes[C2PlanarLayout::PLANE_B].rowInc;
+
+ dstY += dstStride;
+ }
+ return OK;
+}
+
+namespace {
+
+/**
+ * A block of raw allocated memory.
+ */
+struct MemoryBlockPoolBlock {
+ MemoryBlockPoolBlock(size_t size)
+ : mData(new uint8_t[size]), mSize(mData ? size : 0) { }
+
+ ~MemoryBlockPoolBlock() {
+ delete[] mData;
+ }
+
+ const uint8_t *data() const {
+ return mData;
+ }
+
+ size_t size() const {
+ return mSize;
+ }
+
+ C2_DO_NOT_COPY(MemoryBlockPoolBlock);
+
+private:
+ uint8_t *mData;
+ size_t mSize;
+};
+
+/**
+ * A simple raw memory block pool implementation.
+ */
+struct MemoryBlockPoolImpl {
+ void release(std::list<MemoryBlockPoolBlock>::const_iterator block) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ // return block to free blocks if it is the current size; otherwise, discard
+ if (block->size() == mCurrentSize) {
+ mFreeBlocks.splice(mFreeBlocks.begin(), mBlocksInUse, block);
+ } else {
+ mBlocksInUse.erase(block);
+ }
+ }
+
+ std::list<MemoryBlockPoolBlock>::const_iterator fetch(size_t size) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mFreeBlocks.remove_if([size](const MemoryBlockPoolBlock &block) -> bool {
+ return block.size() != size;
+ });
+ mCurrentSize = size;
+ if (mFreeBlocks.empty()) {
+ mBlocksInUse.emplace_front(size);
+ } else {
+ mBlocksInUse.splice(mBlocksInUse.begin(), mFreeBlocks, mFreeBlocks.begin());
+ }
+ return mBlocksInUse.begin();
+ }
+
+ MemoryBlockPoolImpl() = default;
+
+ C2_DO_NOT_COPY(MemoryBlockPoolImpl);
+
+private:
+ std::mutex mMutex;
+ std::list<MemoryBlockPoolBlock> mFreeBlocks;
+ std::list<MemoryBlockPoolBlock> mBlocksInUse;
+ size_t mCurrentSize;
+};
+
+} // namespace
+
+struct MemoryBlockPool::Impl : MemoryBlockPoolImpl {
+};
+
+struct MemoryBlock::Impl {
+ Impl(std::list<MemoryBlockPoolBlock>::const_iterator block,
+ std::shared_ptr<MemoryBlockPoolImpl> pool)
+ : mBlock(block), mPool(pool) {
+ }
+
+ ~Impl() {
+ mPool->release(mBlock);
+ }
+
+ const uint8_t *data() const {
+ return mBlock->data();
+ }
+
+ size_t size() const {
+ return mBlock->size();
+ }
+
+private:
+ std::list<MemoryBlockPoolBlock>::const_iterator mBlock;
+ std::shared_ptr<MemoryBlockPoolImpl> mPool;
+};
+
+MemoryBlock MemoryBlockPool::fetch(size_t size) {
+ std::list<MemoryBlockPoolBlock>::const_iterator poolBlock = mImpl->fetch(size);
+ return MemoryBlock(std::make_shared<MemoryBlock::Impl>(
+ poolBlock, std::static_pointer_cast<MemoryBlockPoolImpl>(mImpl)));
+}
+
+MemoryBlockPool::MemoryBlockPool()
+ : mImpl(std::make_shared<MemoryBlockPool::Impl>()) {
+}
+
+MemoryBlock::MemoryBlock(std::shared_ptr<MemoryBlock::Impl> impl)
+ : mImpl(impl) {
+}
+
+MemoryBlock::MemoryBlock() = default;
+
+MemoryBlock::~MemoryBlock() = default;
+
+const uint8_t* MemoryBlock::data() const {
+ return mImpl ? mImpl->data() : nullptr;
+}
+
+size_t MemoryBlock::size() const {
+ return mImpl ? mImpl->size() : 0;
+}
+
+MemoryBlock MemoryBlock::Allocate(size_t size) {
+ return MemoryBlockPool().fetch(size);
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
new file mode 100644
index 0000000..eaf6776
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_BUFFER_UTILS_H_
+#define CODEC2_BUFFER_UTILS_H_
+
+#include <C2Buffer.h>
+#include <C2ParamDef.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+/**
+ * Converts an RGB view to planar YUV 420 media image.
+ *
+ * \param dstY pointer to media image buffer
+ * \param dstStride stride in bytes
+ * \param dstVStride vertical stride in pixels
+ * \param bufferSize media image buffer size
+ * \param src source image
+ *
+ * \retval NO_MEMORY media image is too small
+ * \retval OK on success
+ */
+status_t ConvertRGBToPlanarYUV(
+ uint8_t *dstY, size_t dstStride, size_t dstVStride, size_t bufferSize,
+ const C2GraphicView &src);
+
+/**
+ * Returns a planar YUV 420 8-bit media image descriptor.
+ *
+ * \param width width of image in pixels
+ * \param height height of image in pixels
+ * \param stride stride of image in pixels
+ * \param vstride vertical stride of image in pixels
+ */
+MediaImage2 CreateYUV420PlanarMediaImage2(
+ uint32_t width, uint32_t height, uint32_t stride, uint32_t vstride);
+
+/**
+ * Returns a semiplanar YUV 420 8-bit media image descriptor.
+ *
+ * \param width width of image in pixels
+ * \param height height of image in pixels
+ * \param stride stride of image in pixels
+ * \param vstride vertical stride of image in pixels
+ */
+MediaImage2 CreateYUV420SemiPlanarMediaImage2(
+ uint32_t width, uint32_t height, uint32_t stride, uint32_t vstride);
+
+/**
+ * Copies a graphic view into a media image.
+ *
+ * \param imgBase base of MediaImage
+ * \param img MediaImage data
+ * \param view graphic view
+ *
+ * \return OK on success
+ */
+status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view);
+
+/**
+ * Copies a media image into a graphic view.
+ *
+ * \param view graphic view
+ * \param imgBase base of MediaImage
+ * \param img MediaImage data
+ *
+ * \return OK on success
+ */
+status_t ImageCopy(C2GraphicView &view, const uint8_t *imgBase, const MediaImage2 *img);
+
+/**
+ * Returns true iff a view has a YUV 420 888 layout.
+ */
+bool IsYUV420(const C2GraphicView &view);
+
+/**
+ * A raw memory block to use for internal buffers.
+ *
+ * TODO: replace this with C2LinearBlocks from a private C2BlockPool
+ */
+struct MemoryBlock : public C2MemoryBlock<uint8_t> {
+ virtual const uint8_t* data() const override;
+ virtual size_t size() const override;
+
+ inline uint8_t *data() {
+ return const_cast<uint8_t*>(const_cast<const MemoryBlock*>(this)->data());
+ }
+
+ // allocates an unmanaged block (not in a pool)
+ static MemoryBlock Allocate(size_t);
+
+ // memory block with no actual memory (size is 0, data is null)
+ MemoryBlock();
+
+ struct Impl;
+ MemoryBlock(std::shared_ptr<Impl> impl);
+ virtual ~MemoryBlock();
+
+private:
+ std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * A raw memory mini-pool.
+ */
+struct MemoryBlockPool {
+ /**
+ * Fetches a block with a given size.
+ *
+ * \param size size in bytes
+ */
+ MemoryBlock fetch(size_t size);
+
+ MemoryBlockPool();
+ ~MemoryBlockPool() = default;
+
+private:
+ struct Impl;
+ std::shared_ptr<Impl> mImpl;
+};
+
+} // namespace android
+
+#endif // CODEC2_BUFFER_UTILS_H_
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
new file mode 100644
index 0000000..97e17e8
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -0,0 +1,815 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2Mapper"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaCodecConstants.h>
+#include <media/stagefright/SurfaceUtils.h>
+#include <media/stagefright/foundation/ALookup.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+
+#include <stdint.h> // for INT32_MAX
+
+#include "Codec2Mapper.h"
+
+using namespace android;
+
+namespace {
+
+ALookup<C2Config::profile_t, int32_t> sAacProfiles = {
+ { C2Config::PROFILE_AAC_LC, AACObjectLC },
+ { C2Config::PROFILE_AAC_MAIN, AACObjectMain },
+ { C2Config::PROFILE_AAC_SSR, AACObjectSSR },
+ { C2Config::PROFILE_AAC_LTP, AACObjectLTP },
+ { C2Config::PROFILE_AAC_HE, AACObjectHE },
+ { C2Config::PROFILE_AAC_SCALABLE, AACObjectScalable },
+ { C2Config::PROFILE_AAC_ER_LC, AACObjectERLC },
+ { C2Config::PROFILE_AAC_ER_SCALABLE, AACObjectERScalable },
+ { C2Config::PROFILE_AAC_LD, AACObjectLD },
+ { C2Config::PROFILE_AAC_HE_PS, AACObjectHE_PS },
+ { C2Config::PROFILE_AAC_ELD, AACObjectELD },
+ { C2Config::PROFILE_AAC_XHE, AACObjectXHE },
+};
+
+ALookup<C2Config::level_t, int32_t> sAvcLevels = {
+ { C2Config::LEVEL_AVC_1, AVCLevel1 },
+ { C2Config::LEVEL_AVC_1B, AVCLevel1b },
+ { C2Config::LEVEL_AVC_1_1, AVCLevel11 },
+ { C2Config::LEVEL_AVC_1_2, AVCLevel12 },
+ { C2Config::LEVEL_AVC_1_3, AVCLevel13 },
+ { C2Config::LEVEL_AVC_2, AVCLevel2 },
+ { C2Config::LEVEL_AVC_2_1, AVCLevel21 },
+ { C2Config::LEVEL_AVC_2_2, AVCLevel22 },
+ { C2Config::LEVEL_AVC_3, AVCLevel3 },
+ { C2Config::LEVEL_AVC_3_1, AVCLevel31 },
+ { C2Config::LEVEL_AVC_3_2, AVCLevel32 },
+ { C2Config::LEVEL_AVC_4, AVCLevel4 },
+ { C2Config::LEVEL_AVC_4_1, AVCLevel41 },
+ { C2Config::LEVEL_AVC_4_2, AVCLevel42 },
+ { C2Config::LEVEL_AVC_5, AVCLevel5 },
+ { C2Config::LEVEL_AVC_5_1, AVCLevel51 },
+ { C2Config::LEVEL_AVC_5_2, AVCLevel52 },
+
+};
+
+ALookup<C2Config::profile_t, int32_t> sAvcProfiles = {
+ // treat restricted profiles as full profile if there is no equivalent - which works for
+ // decoders, but not for encoders
+ { C2Config::PROFILE_AVC_BASELINE, AVCProfileBaseline },
+ { C2Config::PROFILE_AVC_CONSTRAINED_BASELINE, AVCProfileConstrainedBaseline },
+ { C2Config::PROFILE_AVC_MAIN, AVCProfileMain },
+ { C2Config::PROFILE_AVC_EXTENDED, AVCProfileExtended },
+ { C2Config::PROFILE_AVC_HIGH, AVCProfileHigh },
+ { C2Config::PROFILE_AVC_PROGRESSIVE_HIGH, AVCProfileHigh },
+ { C2Config::PROFILE_AVC_CONSTRAINED_HIGH, AVCProfileConstrainedHigh },
+ { C2Config::PROFILE_AVC_HIGH_10, AVCProfileHigh10 },
+ { C2Config::PROFILE_AVC_PROGRESSIVE_HIGH_10, AVCProfileHigh10 },
+ { C2Config::PROFILE_AVC_HIGH_422, AVCProfileHigh422 },
+ { C2Config::PROFILE_AVC_HIGH_444_PREDICTIVE, AVCProfileHigh444 },
+ { C2Config::PROFILE_AVC_HIGH_10_INTRA, AVCProfileHigh10 },
+ { C2Config::PROFILE_AVC_HIGH_422_INTRA, AVCProfileHigh422 },
+ { C2Config::PROFILE_AVC_HIGH_444_INTRA, AVCProfileHigh444 },
+ { C2Config::PROFILE_AVC_CAVLC_444_INTRA, AVCProfileHigh444 },
+};
+
+ALookup<C2Config::bitrate_mode_t, int32_t> sBitrateModes = {
+ { C2Config::BITRATE_CONST, BITRATE_MODE_CBR },
+ { C2Config::BITRATE_VARIABLE, BITRATE_MODE_VBR },
+ { C2Config::BITRATE_IGNORE, BITRATE_MODE_CQ },
+};
+
+ALookup<C2Color::matrix_t, ColorAspects::MatrixCoeffs> sColorMatricesSf = {
+ { C2Color::MATRIX_UNSPECIFIED, ColorAspects::MatrixUnspecified },
+ { C2Color::MATRIX_BT709, ColorAspects::MatrixBT709_5 },
+ { C2Color::MATRIX_FCC47_73_682, ColorAspects::MatrixBT470_6M },
+ { C2Color::MATRIX_BT601, ColorAspects::MatrixBT601_6 },
+ { C2Color::MATRIX_SMPTE240M, ColorAspects::MatrixSMPTE240M },
+ { C2Color::MATRIX_BT2020, ColorAspects::MatrixBT2020 },
+ { C2Color::MATRIX_BT2020_CONSTANT, ColorAspects::MatrixBT2020Constant },
+ { C2Color::MATRIX_OTHER, ColorAspects::MatrixOther },
+};
+
+ALookup<C2Color::primaries_t, ColorAspects::Primaries> sColorPrimariesSf = {
+ { C2Color::PRIMARIES_UNSPECIFIED, ColorAspects::PrimariesUnspecified },
+ { C2Color::PRIMARIES_BT709, ColorAspects::PrimariesBT709_5 },
+ { C2Color::PRIMARIES_BT470_M, ColorAspects::PrimariesBT470_6M },
+ { C2Color::PRIMARIES_BT601_625, ColorAspects::PrimariesBT601_6_625 },
+ { C2Color::PRIMARIES_BT601_525, ColorAspects::PrimariesBT601_6_525 },
+ { C2Color::PRIMARIES_GENERIC_FILM, ColorAspects::PrimariesGenericFilm },
+ { C2Color::PRIMARIES_BT2020, ColorAspects::PrimariesBT2020 },
+// { C2Color::PRIMARIES_RP431, ColorAspects::Primaries... },
+// { C2Color::PRIMARIES_EG432, ColorAspects::Primaries... },
+// { C2Color::PRIMARIES_EBU3213, ColorAspects::Primaries... },
+ { C2Color::PRIMARIES_OTHER, ColorAspects::PrimariesOther },
+};
+
+ALookup<C2Color::range_t, int32_t> sColorRanges = {
+ { C2Color::RANGE_FULL, COLOR_RANGE_FULL },
+ { C2Color::RANGE_LIMITED, COLOR_RANGE_LIMITED },
+};
+
+ALookup<C2Color::range_t, ColorAspects::Range> sColorRangesSf = {
+ { C2Color::RANGE_UNSPECIFIED, ColorAspects::RangeUnspecified },
+ { C2Color::RANGE_FULL, ColorAspects::RangeFull },
+ { C2Color::RANGE_LIMITED, ColorAspects::RangeLimited },
+ { C2Color::RANGE_OTHER, ColorAspects::RangeOther },
+};
+
+ALookup<C2Color::transfer_t, int32_t> sColorTransfers = {
+ { C2Color::TRANSFER_LINEAR, COLOR_TRANSFER_LINEAR },
+ { C2Color::TRANSFER_170M, COLOR_TRANSFER_SDR_VIDEO },
+ { C2Color::TRANSFER_ST2084, COLOR_TRANSFER_ST2084 },
+ { C2Color::TRANSFER_HLG, COLOR_TRANSFER_HLG },
+};
+
+ALookup<C2Color::transfer_t, ColorAspects::Transfer> sColorTransfersSf = {
+ { C2Color::TRANSFER_UNSPECIFIED, ColorAspects::TransferUnspecified },
+ { C2Color::TRANSFER_LINEAR, ColorAspects::TransferLinear },
+ { C2Color::TRANSFER_SRGB, ColorAspects::TransferSRGB },
+ { C2Color::TRANSFER_170M, ColorAspects::TransferSMPTE170M },
+ { C2Color::TRANSFER_GAMMA22, ColorAspects::TransferGamma22 },
+ { C2Color::TRANSFER_GAMMA28, ColorAspects::TransferGamma28 },
+ { C2Color::TRANSFER_ST2084, ColorAspects::TransferST2084 },
+ { C2Color::TRANSFER_HLG, ColorAspects::TransferHLG },
+ { C2Color::TRANSFER_240M, ColorAspects::TransferSMPTE240M },
+ { C2Color::TRANSFER_XVYCC, ColorAspects::TransferXvYCC },
+ { C2Color::TRANSFER_BT1361, ColorAspects::TransferBT1361 },
+ { C2Color::TRANSFER_ST428, ColorAspects::TransferST428 },
+ { C2Color::TRANSFER_OTHER, ColorAspects::TransferOther },
+};
+
+ALookup<C2Config::level_t, int32_t> sDolbyVisionLevels = {
+ { C2Config::LEVEL_DV_MAIN_HD_24, DolbyVisionLevelHd24 },
+ { C2Config::LEVEL_DV_MAIN_HD_30, DolbyVisionLevelHd30 },
+ { C2Config::LEVEL_DV_MAIN_FHD_24, DolbyVisionLevelFhd24 },
+ { C2Config::LEVEL_DV_MAIN_FHD_30, DolbyVisionLevelFhd30 },
+ { C2Config::LEVEL_DV_MAIN_FHD_60, DolbyVisionLevelFhd60 },
+ { C2Config::LEVEL_DV_MAIN_UHD_24, DolbyVisionLevelUhd24 },
+ { C2Config::LEVEL_DV_MAIN_UHD_30, DolbyVisionLevelUhd30 },
+ { C2Config::LEVEL_DV_MAIN_UHD_48, DolbyVisionLevelUhd48 },
+ { C2Config::LEVEL_DV_MAIN_UHD_60, DolbyVisionLevelUhd60 },
+
+ // high tiers are not yet supported on android, for now map them to main tier
+ { C2Config::LEVEL_DV_HIGH_HD_24, DolbyVisionLevelHd24 },
+ { C2Config::LEVEL_DV_HIGH_HD_30, DolbyVisionLevelHd30 },
+ { C2Config::LEVEL_DV_HIGH_FHD_24, DolbyVisionLevelFhd24 },
+ { C2Config::LEVEL_DV_HIGH_FHD_30, DolbyVisionLevelFhd30 },
+ { C2Config::LEVEL_DV_HIGH_FHD_60, DolbyVisionLevelFhd60 },
+ { C2Config::LEVEL_DV_HIGH_UHD_24, DolbyVisionLevelUhd24 },
+ { C2Config::LEVEL_DV_HIGH_UHD_30, DolbyVisionLevelUhd30 },
+ { C2Config::LEVEL_DV_HIGH_UHD_48, DolbyVisionLevelUhd48 },
+ { C2Config::LEVEL_DV_HIGH_UHD_60, DolbyVisionLevelUhd60 },
+};
+
+ALookup<C2Config::profile_t, int32_t> sDolbyVisionProfiles = {
+ { C2Config::PROFILE_DV_AV_PER, DolbyVisionProfileDvavPer },
+ { C2Config::PROFILE_DV_AV_PEN, DolbyVisionProfileDvavPen },
+ { C2Config::PROFILE_DV_HE_DER, DolbyVisionProfileDvheDer },
+ { C2Config::PROFILE_DV_HE_DEN, DolbyVisionProfileDvheDen },
+ { C2Config::PROFILE_DV_HE_04, DolbyVisionProfileDvheDtr },
+ { C2Config::PROFILE_DV_HE_05, DolbyVisionProfileDvheStn },
+ { C2Config::PROFILE_DV_HE_DTH, DolbyVisionProfileDvheDth },
+ { C2Config::PROFILE_DV_HE_07, DolbyVisionProfileDvheDtb },
+ { C2Config::PROFILE_DV_HE_08, DolbyVisionProfileDvheSt },
+ { C2Config::PROFILE_DV_AV_09, DolbyVisionProfileDvavSe },
+};
+
+ALookup<C2Config::level_t, int32_t> sH263Levels = {
+ { C2Config::LEVEL_H263_10, H263Level10 },
+ { C2Config::LEVEL_H263_20, H263Level20 },
+ { C2Config::LEVEL_H263_30, H263Level30 },
+ { C2Config::LEVEL_H263_40, H263Level40 },
+ { C2Config::LEVEL_H263_45, H263Level45 },
+ { C2Config::LEVEL_H263_50, H263Level50 },
+ { C2Config::LEVEL_H263_60, H263Level60 },
+ { C2Config::LEVEL_H263_70, H263Level70 },
+};
+
+ALookup<C2Config::profile_t, int32_t> sH263Profiles = {
+ { C2Config::PROFILE_H263_BASELINE, H263ProfileBaseline },
+ { C2Config::PROFILE_H263_H320, H263ProfileH320Coding },
+ { C2Config::PROFILE_H263_V1BC, H263ProfileBackwardCompatible },
+ { C2Config::PROFILE_H263_ISWV2, H263ProfileISWV2 },
+ { C2Config::PROFILE_H263_ISWV3, H263ProfileISWV3 },
+ { C2Config::PROFILE_H263_HIGH_COMPRESSION, H263ProfileHighCompression },
+ { C2Config::PROFILE_H263_INTERNET, H263ProfileInternet },
+ { C2Config::PROFILE_H263_INTERLACE, H263ProfileInterlace },
+ { C2Config::PROFILE_H263_HIGH_LATENCY, H263ProfileHighLatency },
+};
+
+ALookup<C2Config::level_t, int32_t> sHevcLevels = {
+ { C2Config::LEVEL_HEVC_MAIN_1, HEVCMainTierLevel1 },
+ { C2Config::LEVEL_HEVC_MAIN_2, HEVCMainTierLevel2 },
+ { C2Config::LEVEL_HEVC_MAIN_2_1, HEVCMainTierLevel21 },
+ { C2Config::LEVEL_HEVC_MAIN_3, HEVCMainTierLevel3 },
+ { C2Config::LEVEL_HEVC_MAIN_3_1, HEVCMainTierLevel31 },
+ { C2Config::LEVEL_HEVC_MAIN_4, HEVCMainTierLevel4 },
+ { C2Config::LEVEL_HEVC_MAIN_4_1, HEVCMainTierLevel41 },
+ { C2Config::LEVEL_HEVC_MAIN_5, HEVCMainTierLevel5 },
+ { C2Config::LEVEL_HEVC_MAIN_5_1, HEVCMainTierLevel51 },
+ { C2Config::LEVEL_HEVC_MAIN_5_2, HEVCMainTierLevel52 },
+ { C2Config::LEVEL_HEVC_MAIN_6, HEVCMainTierLevel6 },
+ { C2Config::LEVEL_HEVC_MAIN_6_1, HEVCMainTierLevel61 },
+ { C2Config::LEVEL_HEVC_MAIN_6_2, HEVCMainTierLevel62 },
+
+ { C2Config::LEVEL_HEVC_HIGH_4, HEVCHighTierLevel4 },
+ { C2Config::LEVEL_HEVC_HIGH_4_1, HEVCHighTierLevel41 },
+ { C2Config::LEVEL_HEVC_HIGH_5, HEVCHighTierLevel5 },
+ { C2Config::LEVEL_HEVC_HIGH_5_1, HEVCHighTierLevel51 },
+ { C2Config::LEVEL_HEVC_HIGH_5_2, HEVCHighTierLevel52 },
+ { C2Config::LEVEL_HEVC_HIGH_6, HEVCHighTierLevel6 },
+ { C2Config::LEVEL_HEVC_HIGH_6_1, HEVCHighTierLevel61 },
+ { C2Config::LEVEL_HEVC_HIGH_6_2, HEVCHighTierLevel62 },
+
+ // map high tier levels below 4 to main tier
+ { C2Config::LEVEL_HEVC_MAIN_1, HEVCHighTierLevel1 },
+ { C2Config::LEVEL_HEVC_MAIN_2, HEVCHighTierLevel2 },
+ { C2Config::LEVEL_HEVC_MAIN_2_1, HEVCHighTierLevel21 },
+ { C2Config::LEVEL_HEVC_MAIN_3, HEVCHighTierLevel3 },
+ { C2Config::LEVEL_HEVC_MAIN_3_1, HEVCHighTierLevel31 },
+};
+
+ALookup<C2Config::profile_t, int32_t> sHevcProfiles = {
+ { C2Config::PROFILE_HEVC_MAIN, HEVCProfileMain },
+ { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10 },
+ { C2Config::PROFILE_HEVC_MAIN_STILL, HEVCProfileMainStill },
+ { C2Config::PROFILE_HEVC_MAIN_INTRA, HEVCProfileMain },
+ { C2Config::PROFILE_HEVC_MAIN_10_INTRA, HEVCProfileMain10 },
+};
+
+ALookup<C2Config::level_t, int32_t> sMpeg2Levels = {
+ { C2Config::LEVEL_MP2V_LOW, MPEG2LevelLL },
+ { C2Config::LEVEL_MP2V_MAIN, MPEG2LevelML },
+ { C2Config::LEVEL_MP2V_HIGH_1440, MPEG2LevelH14 },
+ { C2Config::LEVEL_MP2V_HIGH, MPEG2LevelHL },
+ { C2Config::LEVEL_MP2V_HIGHP, MPEG2LevelHP },
+};
+
+ALookup<C2Config::profile_t, int32_t> sMpeg2Profiles = {
+ { C2Config::PROFILE_MP2V_SIMPLE, MPEG2ProfileSimple },
+ { C2Config::PROFILE_MP2V_MAIN, MPEG2ProfileMain },
+ { C2Config::PROFILE_MP2V_SNR_SCALABLE, MPEG2ProfileSNR },
+ { C2Config::PROFILE_MP2V_SPATIALLY_SCALABLE, MPEG2ProfileSpatial },
+ { C2Config::PROFILE_MP2V_HIGH, MPEG2ProfileHigh },
+ { C2Config::PROFILE_MP2V_422, MPEG2Profile422 },
+};
+
+ALookup<C2Config::level_t, int32_t> sMpeg4Levels = {
+ { C2Config::LEVEL_MP4V_0, MPEG4Level0 },
+ { C2Config::LEVEL_MP4V_0B, MPEG4Level0b },
+ { C2Config::LEVEL_MP4V_1, MPEG4Level1 },
+ { C2Config::LEVEL_MP4V_2, MPEG4Level2 },
+ { C2Config::LEVEL_MP4V_3, MPEG4Level3 },
+ { C2Config::LEVEL_MP4V_3B, MPEG4Level3b },
+ { C2Config::LEVEL_MP4V_4, MPEG4Level4 },
+ { C2Config::LEVEL_MP4V_4A, MPEG4Level4a },
+ { C2Config::LEVEL_MP4V_5, MPEG4Level5 },
+ { C2Config::LEVEL_MP4V_6, MPEG4Level6 },
+};
+
+ALookup<C2Config::profile_t, int32_t> sMpeg4Profiles = {
+ { C2Config::PROFILE_MP4V_SIMPLE, MPEG4ProfileSimple },
+ { C2Config::PROFILE_MP4V_SIMPLE_SCALABLE, MPEG4ProfileSimpleScalable },
+ { C2Config::PROFILE_MP4V_CORE, MPEG4ProfileCore },
+ { C2Config::PROFILE_MP4V_MAIN, MPEG4ProfileMain },
+ { C2Config::PROFILE_MP4V_NBIT, MPEG4ProfileNbit },
+ { C2Config::PROFILE_MP4V_ARTS, MPEG4ProfileAdvancedRealTime },
+ { C2Config::PROFILE_MP4V_CORE_SCALABLE, MPEG4ProfileCoreScalable },
+ { C2Config::PROFILE_MP4V_ACE, MPEG4ProfileAdvancedCoding },
+ { C2Config::PROFILE_MP4V_ADVANCED_CORE, MPEG4ProfileAdvancedCore },
+ { C2Config::PROFILE_MP4V_ADVANCED_SIMPLE, MPEG4ProfileAdvancedSimple },
+};
+
+ALookup<C2Config::pcm_encoding_t, int32_t> sPcmEncodings = {
+ { C2Config::PCM_8, kAudioEncodingPcm8bit },
+ { C2Config::PCM_16, kAudioEncodingPcm16bit },
+ { C2Config::PCM_FLOAT, kAudioEncodingPcmFloat },
+};
+
+ALookup<C2Config::level_t, int32_t> sVp9Levels = {
+ { C2Config::LEVEL_VP9_1, VP9Level1 },
+ { C2Config::LEVEL_VP9_1_1, VP9Level11 },
+ { C2Config::LEVEL_VP9_2, VP9Level2 },
+ { C2Config::LEVEL_VP9_2_1, VP9Level21 },
+ { C2Config::LEVEL_VP9_3, VP9Level3 },
+ { C2Config::LEVEL_VP9_3_1, VP9Level31 },
+ { C2Config::LEVEL_VP9_4, VP9Level4 },
+ { C2Config::LEVEL_VP9_4_1, VP9Level41 },
+ { C2Config::LEVEL_VP9_5, VP9Level5 },
+ { C2Config::LEVEL_VP9_5_1, VP9Level51 },
+ { C2Config::LEVEL_VP9_5_2, VP9Level52 },
+ { C2Config::LEVEL_VP9_6, VP9Level6 },
+ { C2Config::LEVEL_VP9_6_1, VP9Level61 },
+ { C2Config::LEVEL_VP9_6_2, VP9Level62 },
+};
+
+ALookup<C2Config::profile_t, int32_t> sVp9Profiles = {
+ { C2Config::PROFILE_VP9_0, VP9Profile0 },
+ { C2Config::PROFILE_VP9_1, VP9Profile1 },
+ { C2Config::PROFILE_VP9_2, VP9Profile2 },
+ { C2Config::PROFILE_VP9_3, VP9Profile3 },
+};
+
+/**
+ * A helper that passes through vendor extension profile and level values.
+ */
+struct ProfileLevelMapperHelper : C2Mapper::ProfileLevelMapper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) = 0;
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) = 0;
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) = 0;
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) = 0;
+
+ template<typename T, typename U>
+ bool passThroughMap(T from, U *to) {
+ // allow (and pass through) vendor extensions
+ if (from >= (T)C2_PROFILE_LEVEL_VENDOR_START && from < (T)INT32_MAX) {
+ *to = (U)from;
+ return true;
+ }
+ return simpleMap(from, to);
+ }
+
+ virtual bool mapLevel(C2Config::level_t from, int32_t *to) {
+ return passThroughMap(from, to);
+ }
+
+ virtual bool mapLevel(int32_t from, C2Config::level_t *to) {
+ return passThroughMap(from, to);
+ }
+
+ virtual bool mapProfile(C2Config::profile_t from, int32_t *to) {
+ return passThroughMap(from, to);
+ }
+
+ virtual bool mapProfile(int32_t from, C2Config::profile_t *to) {
+ return passThroughMap(from, to);
+ }
+};
+
+// AAC only uses profiles, map all levels to unused or 0
+struct AacProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t, int32_t *to) {
+ *to = 0;
+ return true;
+ }
+ virtual bool simpleMap(int32_t, C2Config::level_t *to) {
+ *to = C2Config::LEVEL_UNUSED;
+ return true;
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sAacProfiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sAacProfiles.map(from, to);
+ }
+};
+
+struct AvcProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+ return sAvcLevels.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+ return sAvcLevels.map(from, to);
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sAvcProfiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sAvcProfiles.map(from, to);
+ }
+};
+
+struct DolbyVisionProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+ return sDolbyVisionLevels.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+ return sDolbyVisionLevels.map(from, to);
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sDolbyVisionProfiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sDolbyVisionProfiles.map(from, to);
+ }
+};
+
+struct H263ProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+ return sH263Levels.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+ return sH263Levels.map(from, to);
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sH263Profiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sH263Profiles.map(from, to);
+ }
+};
+
+struct HevcProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+ return sHevcLevels.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+ return sHevcLevels.map(from, to);
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sHevcProfiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sHevcProfiles.map(from, to);
+ }
+};
+
+struct Mpeg2ProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+ return sMpeg2Levels.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+ return sMpeg2Levels.map(from, to);
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sMpeg2Profiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sMpeg2Profiles.map(from, to);
+ }
+};
+
+struct Mpeg4ProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+ return sMpeg4Levels.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+ return sMpeg4Levels.map(from, to);
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sMpeg4Profiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sMpeg4Profiles.map(from, to);
+ }
+};
+
+// VP8 has no profiles and levels in Codec 2.0, but we use main profile and level 0 in MediaCodec
+// map all profiles and levels to that.
+struct Vp8ProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t, int32_t *to) {
+ *to = VP8Level_Version0;
+ return true;
+ }
+ virtual bool simpleMap(int32_t, C2Config::level_t *to) {
+ *to = C2Config::LEVEL_UNUSED;
+ return true;
+ }
+ virtual bool simpleMap(C2Config::profile_t, int32_t *to) {
+ *to = VP8ProfileMain;
+ return true;
+ }
+ virtual bool simpleMap(int32_t, C2Config::profile_t *to) {
+ *to = C2Config::PROFILE_UNUSED;
+ return true;
+ }
+};
+
+struct Vp9ProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+ return sVp9Levels.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+ return sVp9Levels.map(from, to);
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sVp9Profiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sVp9Profiles.map(from, to);
+ }
+};
+
+} // namespace
+
+// static
+std::shared_ptr<C2Mapper::ProfileLevelMapper>
+C2Mapper::GetProfileLevelMapper(std::string mediaType) {
+ std::transform(mediaType.begin(), mediaType.begin(), mediaType.end(), ::tolower);
+ if (mediaType == MIMETYPE_AUDIO_AAC) {
+ return std::make_shared<AacProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_AVC) {
+ return std::make_shared<AvcProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_DOLBY_VISION) {
+ return std::make_shared<DolbyVisionProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_H263) {
+ return std::make_shared<H263ProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_HEVC) {
+ return std::make_shared<HevcProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_MPEG2) {
+ return std::make_shared<Mpeg2ProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_MPEG4) {
+ return std::make_shared<Mpeg4ProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_VP8) {
+ return std::make_shared<Vp8ProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_VP9) {
+ return std::make_shared<Vp9ProfileLevelMapper>();
+ }
+ return nullptr;
+}
+
+// static
+bool C2Mapper::map(C2Config::bitrate_mode_t from, int32_t *to) {
+ return sBitrateModes.map(from, to);
+}
+
+// static
+bool C2Mapper::map(int32_t from, C2Config::bitrate_mode_t *to) {
+ return sBitrateModes.map(from, to);
+}
+
+// static
+bool C2Mapper::map(C2Config::pcm_encoding_t from, int32_t *to) {
+ return sPcmEncodings.map(from, to);
+}
+
+// static
+bool C2Mapper::map(int32_t from, C2Config::pcm_encoding_t *to) {
+ return sPcmEncodings.map(from, to);
+}
+
+// static
+bool C2Mapper::map(C2Color::range_t from, int32_t *to) {
+ bool res = true;
+ // map SDK defined values directly. For other values, use wrapping from ColorUtils.
+ if (!sColorRanges.map(from, to)) {
+ ColorAspects::Range sfRange;
+
+ // map known constants and keep vendor extensions. all other values are mapped to 'Other'
+ if (!sColorRangesSf.map(from, &sfRange)) {
+ // use static cast and ensure it is in the extension range
+ if (from < C2Color::RANGE_VENDOR_START || from > C2Color::RANGE_OTHER) {
+ sfRange = ColorAspects::RangeOther;
+ res = false;
+ }
+ }
+
+ *to = ColorUtils::wrapColorAspectsIntoColorRange(sfRange);
+ }
+ return res;
+}
+
+// static
+bool C2Mapper::map(int32_t from, C2Color::range_t *to) {
+ // map SDK defined values directly. For other values, use wrapping from ColorUtils.
+ if (!sColorRanges.map(from, to)) {
+ ColorAspects::Range sfRange;
+ (void)ColorUtils::unwrapColorAspectsFromColorRange(from, &sfRange);
+
+ // map known constants and keep vendor extensions. all other values are mapped to 'Other'
+ if (!sColorRangesSf.map(sfRange, to)) {
+ // use static cast and ensure it is in the extension range
+ *to = (C2Color::range_t)sfRange;
+ if (*to < C2Color::RANGE_VENDOR_START || *to > C2Color::RANGE_OTHER) {
+ *to = C2Color::RANGE_OTHER;
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+// static
+bool C2Mapper::map(C2Color::range_t from, ColorAspects::Range *to) {
+ return sColorRangesSf.map(from, to);
+}
+
+// static
+bool C2Mapper::map(ColorAspects::Range from, C2Color::range_t *to) {
+ return sColorRangesSf.map(from, to);
+}
+
+// static
+bool C2Mapper::map(C2Color::primaries_t primaries, C2Color::matrix_t matrix, int32_t *standard) {
+ ColorAspects::Primaries sfPrimaries;
+ ColorAspects::MatrixCoeffs sfMatrix;
+ bool res = true;
+
+ // map known constants and keep vendor extensions. all other values are mapped to 'Other'
+ if (!sColorPrimariesSf.map(primaries, &sfPrimaries)) {
+ // ensure it is in the extension range and use static cast
+ if (primaries < C2Color::PRIMARIES_VENDOR_START || primaries > C2Color::PRIMARIES_OTHER) {
+ // undefined non-extension values map to 'Other'
+ sfPrimaries = ColorAspects::PrimariesOther;
+ res = false;
+ } else {
+ sfPrimaries = (ColorAspects::Primaries)primaries;
+ }
+ }
+
+ if (!sColorMatricesSf.map(matrix, &sfMatrix)) {
+ // use static cast and ensure it is in the extension range
+ if (matrix < C2Color::MATRIX_VENDOR_START || matrix > C2Color::MATRIX_OTHER) {
+ // undefined non-extension values map to 'Other'
+ sfMatrix = ColorAspects::MatrixOther;
+ res = false;
+ } else {
+ sfMatrix = (ColorAspects::MatrixCoeffs)matrix;
+ }
+ }
+
+ *standard = ColorUtils::wrapColorAspectsIntoColorStandard(sfPrimaries, sfMatrix);
+
+ return res;
+}
+
+// static
+bool C2Mapper::map(int32_t standard, C2Color::primaries_t *primaries, C2Color::matrix_t *matrix) {
+ // first map to stagefright foundation aspects => these actually map nearly 1:1 to
+ // Codec 2.0 aspects
+ ColorAspects::Primaries sfPrimaries;
+ ColorAspects::MatrixCoeffs sfMatrix;
+ bool res = true;
+ (void)ColorUtils::unwrapColorAspectsFromColorStandard(standard, &sfPrimaries, &sfMatrix);
+
+ // map known constants and keep vendor extensions. all other values are mapped to 'Other'
+ if (!sColorPrimariesSf.map(sfPrimaries, primaries)) {
+ // use static cast and ensure it is in the extension range
+ *primaries = (C2Color::primaries_t)sfPrimaries;
+ if (*primaries < C2Color::PRIMARIES_VENDOR_START || *primaries > C2Color::PRIMARIES_OTHER) {
+ *primaries = C2Color::PRIMARIES_OTHER;
+ res = false;
+ }
+ }
+
+ if (!sColorMatricesSf.map(sfMatrix, matrix)) {
+ // use static cast and ensure it is in the extension range
+ *matrix = (C2Color::matrix_t)sfMatrix;
+ if (*matrix < C2Color::MATRIX_VENDOR_START || *matrix > C2Color::MATRIX_OTHER) {
+ *matrix = C2Color::MATRIX_OTHER;
+ res = false;
+ }
+ }
+
+ return res;
+}
+
+// static
+bool C2Mapper::map(C2Color::primaries_t from, ColorAspects::Primaries *to) {
+ return sColorPrimariesSf.map(from, to);
+}
+
+// static
+bool C2Mapper::map(ColorAspects::Primaries from, C2Color::primaries_t *to) {
+ return sColorPrimariesSf.map(from, to);
+}
+
+// static
+bool C2Mapper::map(C2Color::matrix_t from, ColorAspects::MatrixCoeffs *to) {
+ return sColorMatricesSf.map(from, to);
+}
+
+// static
+bool C2Mapper::map(ColorAspects::MatrixCoeffs from, C2Color::matrix_t *to) {
+ return sColorMatricesSf.map(from, to);
+}
+
+// static
+bool C2Mapper::map(C2Color::transfer_t from, int32_t *to) {
+ bool res = true;
+ // map SDK defined values directly. For other values, use wrapping from ColorUtils.
+ if (!sColorTransfers.map(from, to)) {
+ ColorAspects::Transfer sfTransfer;
+
+ // map known constants and keep vendor extensions. all other values are mapped to 'Other'
+ if (!sColorTransfersSf.map(from, &sfTransfer)) {
+ // use static cast and ensure it is in the extension range
+ if (from < C2Color::TRANSFER_VENDOR_START || from > C2Color::TRANSFER_OTHER) {
+ sfTransfer = ColorAspects::TransferOther;
+ res = false;
+ }
+ }
+
+ *to = ColorUtils::wrapColorAspectsIntoColorTransfer(sfTransfer);
+ }
+ return res;
+}
+
+// static
+bool C2Mapper::map(int32_t from, C2Color::transfer_t *to) {
+ // map SDK defined values directly. For other values, use wrapping from ColorUtils.
+ if (!sColorTransfers.map(from, to)) {
+ ColorAspects::Transfer sfTransfer;
+ (void)ColorUtils::unwrapColorAspectsFromColorTransfer(from, &sfTransfer);
+
+ // map known constants and keep vendor extensions. all other values are mapped to 'Other'
+ if (!sColorTransfersSf.map(sfTransfer, to)) {
+ // use static cast and ensure it is in the extension range
+ *to = (C2Color::transfer_t)sfTransfer;
+ if (*to < C2Color::TRANSFER_VENDOR_START || *to > C2Color::TRANSFER_OTHER) {
+ *to = C2Color::TRANSFER_OTHER;
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+// static
+bool C2Mapper::map(
+ C2Color::range_t range, C2Color::primaries_t primaries,
+ C2Color::matrix_t matrix, C2Color::transfer_t transfer, uint32_t *dataSpace) {
+#if 0
+ // pure reimplementation
+ *dataSpace = HAL_DATASPACE_UNKNOWN; // this is 0
+
+ switch (range) {
+ case C2Color::RANGE_FULL: *dataSpace |= HAL_DATASPACE_RANGE_FULL; break;
+ case C2Color::RANGE_LIMITED: *dataSpace |= HAL_DATASPACE_RANGE_LIMITED; break;
+ default: break;
+ }
+
+ switch (transfer) {
+ case C2Color::TRANSFER_LINEAR: *dataSpace |= HAL_DATASPACE_TRANSFER_LINEAR; break;
+ case C2Color::TRANSFER_SRGB: *dataSpace |= HAL_DATASPACE_TRANSFER_SRGB; break;
+ case C2Color::TRANSFER_170M: *dataSpace |= HAL_DATASPACE_TRANSFER_SMPTE_170M; break;
+ case C2Color::TRANSFER_GAMMA22: *dataSpace |= HAL_DATASPACE_TRANSFER_GAMMA2_2; break;
+ case C2Color::TRANSFER_GAMMA28: *dataSpace |= HAL_DATASPACE_TRANSFER_GAMMA2_8; break;
+ case C2Color::TRANSFER_ST2084: *dataSpace |= HAL_DATASPACE_TRANSFER_ST2084; break;
+ case C2Color::TRANSFER_HLG: *dataSpace |= HAL_DATASPACE_TRANSFER_HLG; break;
+ default: break;
+ }
+
+ switch (primaries) {
+ case C2Color::PRIMARIES_BT601_525:
+ *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+ || matrix == C2Color::MATRIX_BT709)
+ ? HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED
+ : HAL_DATASPACE_STANDARD_BT601_525;
+ break;
+ case C2Color::PRIMARIES_BT601_625:
+ *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+ || matrix == C2Color::MATRIX_BT709)
+ ? HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED
+ : HAL_DATASPACE_STANDARD_BT601_625;
+ break;
+ case C2Color::PRIMARIES_BT2020:
+ *dataSpace |= (matrix == C2Color::MATRIX_BT2020CONSTANT
+ ? HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE
+ : HAL_DATASPACE_STANDARD_BT2020);
+ break;
+ case C2Color::PRIMARIES_BT470_M:
+ *dataSpace |= HAL_DATASPACE_STANDARD_BT470M;
+ break;
+ case C2Color::PRIMARIES_BT709:
+ *dataSpace |= HAL_DATASPACE_STANDARD_BT709;
+ break;
+ default: break;
+ }
+#else
+ // for now use legacy implementation
+ ColorAspects aspects;
+ if (!sColorRangesSf.map(range, &aspects.mRange)) {
+ aspects.mRange = ColorAspects::RangeUnspecified;
+ }
+ if (!sColorPrimariesSf.map(primaries, &aspects.mPrimaries)) {
+ aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+ }
+ if (!sColorMatricesSf.map(matrix, &aspects.mMatrixCoeffs)) {
+ aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+ }
+ if (!sColorTransfersSf.map(transfer, &aspects.mTransfer)) {
+ aspects.mTransfer = ColorAspects::TransferUnspecified;
+ }
+ *dataSpace = ColorUtils::getDataSpaceForColorAspects(aspects, true /* mayExpand */);
+#endif
+ return true;
+}
+
+// static
+bool C2Mapper::map(C2Color::transfer_t from, ColorAspects::Transfer *to) {
+ return sColorTransfersSf.map(from, to);
+}
+
+// static
+bool C2Mapper::map(ColorAspects::Transfer from, C2Color::transfer_t *to) {
+ return sColorTransfersSf.map(from, to);
+}
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.h b/media/codec2/sfplugin/utils/Codec2Mapper.h
new file mode 100644
index 0000000..1eeb92e
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CODEC2_MAPPER_H_
+#define ANDROID_CODEC2_MAPPER_H_
+
+#include <C2Config.h>
+
+#include <media/stagefright/foundation/ColorUtils.h>
+
+#include <memory>
+
+namespace android {
+
+ /**
+ * Utility class to map Codec 2.0 values to android values.
+ */
+ struct C2Mapper {
+ struct ProfileLevelMapper {
+ virtual bool mapProfile(C2Config::profile_t, int32_t*) = 0;
+ virtual bool mapProfile(int32_t, C2Config::profile_t*) = 0;
+ virtual bool mapLevel(C2Config::level_t, int32_t*) = 0;
+ virtual bool mapLevel(int32_t, C2Config::level_t*) = 0;
+ virtual ~ProfileLevelMapper() = default;
+ };
+
+ static std::shared_ptr<ProfileLevelMapper>
+ GetProfileLevelMapper(std::string mediaType);
+
+ // convert between bitrates
+ static bool map(C2Config::bitrate_mode_t, int32_t*);
+ static bool map(int32_t, C2Config::bitrate_mode_t*);
+
+ // convert between pcm encodings
+ static bool map(C2Config::pcm_encoding_t, int32_t*);
+ static bool map(int32_t, C2Config::pcm_encoding_t*);
+
+ // convert between picture types
+ static bool map(C2Config::picture_type_t, int32_t*);
+ static bool map(int32_t, C2Config::picture_type_t*);
+
+ // convert between color aspects
+ static bool map(C2Color::range_t, int32_t*);
+ static bool map(int32_t, C2Color::range_t*);
+ static bool map(C2Color::primaries_t, C2Color::matrix_t, int32_t*);
+ static bool map(int32_t, C2Color::primaries_t*, C2Color::matrix_t*);
+ static bool map(C2Color::transfer_t, int32_t*);
+ static bool map(int32_t, C2Color::transfer_t*);
+
+ static bool map(
+ C2Color::range_t, C2Color::primaries_t, C2Color::matrix_t, C2Color::transfer_t,
+ uint32_t *dataSpace);
+
+ static bool map(C2Color::range_t, ColorAspects::Range*);
+ static bool map(ColorAspects::Range, C2Color::range_t*);
+ static bool map(C2Color::primaries_t, ColorAspects::Primaries*);
+ static bool map(ColorAspects::Primaries, C2Color::primaries_t*);
+ static bool map(C2Color::matrix_t, ColorAspects::MatrixCoeffs*);
+ static bool map(ColorAspects::MatrixCoeffs, C2Color::matrix_t*);
+ static bool map(C2Color::transfer_t, ColorAspects::Transfer*);
+ static bool map(ColorAspects::Transfer, C2Color::transfer_t*);
+ };
+}
+
+#endif // ANDROID_CODEC2_MAPPER_H_