codec2: Add C2AllocatorBuf to implement a libdmabufheap BufferAllocator
Initial step to transition to libdmabufheap from ion.
This adds a C2AlllocatorBuf implementation which handles the
logic between allocating from DMABUF Heaps.
Also, to address the lack of !ION_FLAG_CACHED support in
the DMABUF Heaps (currently), in order to try to support
it for the ION fallback, I hope to use a "system-uncached"
heap (to be upstreamed into the kernel hopefully soon).
Signed-off-by: John Stultz <john.stultz@linaro.org>
Change-Id: I09ef198355140c28b50cc17b8a7acc54310e46fb
diff --git a/media/codec2/vndk/C2DmaBufAllocator.cpp b/media/codec2/vndk/C2DmaBufAllocator.cpp
new file mode 100644
index 0000000..59e82e2
--- /dev/null
+++ b/media/codec2/vndk/C2DmaBufAllocator.cpp
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2DmaBufAllocator"
+#include <BufferAllocator/BufferAllocator.h>
+#include <C2Buffer.h>
+#include <C2Debug.h>
+#include <C2DmaBufAllocator.h>
+#include <C2ErrnoUtils.h>
+#include <linux/ion.h>
+#include <sys/mman.h>
+#include <unistd.h> // getpagesize, size_t, close, dup
+#include <utils/Log.h>
+
+#include <list>
+
+#ifdef __ANDROID_APEX__
+#include <android-base/properties.h>
+#endif
+
+namespace android {
+
+namespace {
+constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+}
+
+/* =========================== BUFFER HANDLE =========================== */
+/**
+ * Buffer handle
+ *
+ * Stores dmabuf fd & metadata
+ *
+ * This handle will not capture mapped fd-s as updating that would require a
+ * global mutex.
+ */
+
+struct C2HandleBuf : public C2Handle {
+ C2HandleBuf(int bufferFd, size_t size)
+ : C2Handle(cHeader),
+ mFds{bufferFd},
+ mInts{int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic} {}
+
+ static bool IsValid(const C2Handle* const o);
+
+ int bufferFd() const { return mFds.mBuffer; }
+ size_t size() const {
+ return size_t(unsigned(mInts.mSizeLo)) | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32);
+ }
+
+ protected:
+ struct {
+ int mBuffer; // dmabuf fd
+ } mFds;
+ struct {
+ int mSizeLo; // low 32-bits of size
+ int mSizeHi; // high 32-bits of size
+ int mMagic;
+ } mInts;
+
+ private:
+ typedef C2HandleBuf _type;
+ enum {
+ kMagic = '\xc2io\x00',
+ numFds = sizeof(mFds) / sizeof(int),
+ numInts = sizeof(mInts) / sizeof(int),
+ version = sizeof(C2Handle)
+ };
+ // constexpr static C2Handle cHeader = { version, numFds, numInts, {} };
+ const static C2Handle cHeader;
+};
+
+const C2Handle C2HandleBuf::cHeader = {
+ C2HandleBuf::version, C2HandleBuf::numFds, C2HandleBuf::numInts, {}};
+
+// static
+bool C2HandleBuf::IsValid(const C2Handle* const o) {
+ if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
+ return false;
+ }
+ const C2HandleBuf* other = static_cast<const C2HandleBuf*>(o);
+ return other->mInts.mMagic == kMagic;
+}
+
+/* =========================== DMABUF ALLOCATION =========================== */
+class C2DmaBufAllocation : public C2LinearAllocation {
+ public:
+ /* Interface methods */
+ virtual c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
+ void** addr /* nonnull */) override;
+ virtual c2_status_t unmap(void* addr, size_t size, C2Fence* fenceFd) override;
+ virtual ~C2DmaBufAllocation() override;
+ virtual const C2Handle* handle() const override;
+ virtual id_t getAllocatorId() const override;
+ virtual bool equals(const std::shared_ptr<C2LinearAllocation>& other) const override;
+
+ // internal methods
+ C2DmaBufAllocation(BufferAllocator& alloc, size_t size, C2String heap_name, unsigned flags,
+ C2Allocator::id_t id);
+ C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id);
+
+ c2_status_t status() const;
+
+ protected:
+ virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
+ int prot, int flags, void** base, void** addr) {
+ c2_status_t err = C2_OK;
+ *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
+ ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
+ "returned (%d)",
+ mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
+ if (*base == MAP_FAILED) {
+ *base = *addr = nullptr;
+ err = c2_map_errno<EINVAL>(errno);
+ } else {
+ *addr = (uint8_t*)*base + alignmentBytes;
+ }
+ return err;
+ }
+
+ C2Allocator::id_t mId;
+ C2HandleBuf mHandle;
+ c2_status_t mInit;
+ struct Mapping {
+ void* addr;
+ size_t alignmentBytes;
+ size_t size;
+ };
+ std::list<Mapping> mMappings;
+
+ // TODO: we could make this encapsulate shared_ptr and copiable
+ C2_DO_NOT_COPY(C2DmaBufAllocation);
+};
+
+c2_status_t C2DmaBufAllocation::map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
+ void** addr) {
+ (void)fence; // TODO: wait for fence
+ *addr = nullptr;
+ if (!mMappings.empty()) {
+ ALOGV("multiple map");
+ // TODO: technically we should return DUPLICATE here, but our block views
+ // don't actually unmap, so we end up remapping the buffer multiple times.
+ //
+ // return C2_DUPLICATE;
+ }
+ if (size == 0) {
+ return C2_BAD_VALUE;
+ }
+
+ int prot = PROT_NONE;
+ int flags = MAP_SHARED;
+ if (usage.expected & C2MemoryUsage::CPU_READ) {
+ prot |= PROT_READ;
+ }
+ if (usage.expected & C2MemoryUsage::CPU_WRITE) {
+ prot |= PROT_WRITE;
+ }
+
+ size_t alignmentBytes = offset % PAGE_SIZE;
+ size_t mapOffset = offset - alignmentBytes;
+ size_t mapSize = size + alignmentBytes;
+ Mapping map = {nullptr, alignmentBytes, mapSize};
+
+ c2_status_t err =
+ mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
+ if (map.addr) {
+ mMappings.push_back(map);
+ }
+ return err;
+}
+
+c2_status_t C2DmaBufAllocation::unmap(void* addr, size_t size, C2Fence* fence) {
+ if (mMappings.empty()) {
+ ALOGD("tried to unmap unmapped buffer");
+ return C2_NOT_FOUND;
+ }
+ for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
+ if (addr != (uint8_t*)it->addr + it->alignmentBytes ||
+ size + it->alignmentBytes != it->size) {
+ continue;
+ }
+ int err = munmap(it->addr, it->size);
+ if (err != 0) {
+ ALOGD("munmap failed");
+ return c2_map_errno<EINVAL>(errno);
+ }
+ if (fence) {
+ *fence = C2Fence(); // not using fences
+ }
+ (void)mMappings.erase(it);
+ ALOGV("successfully unmapped: %d", mHandle.bufferFd());
+ return C2_OK;
+ }
+ ALOGD("unmap failed to find specified map");
+ return C2_BAD_VALUE;
+}
+
+c2_status_t C2DmaBufAllocation::status() const {
+ return mInit;
+}
+
+C2Allocator::id_t C2DmaBufAllocation::getAllocatorId() const {
+ return mId;
+}
+
+bool C2DmaBufAllocation::equals(const std::shared_ptr<C2LinearAllocation>& other) const {
+ if (!other || other->getAllocatorId() != getAllocatorId()) {
+ return false;
+ }
+ // get user handle to compare objects
+ std::shared_ptr<C2DmaBufAllocation> otherAsBuf =
+ std::static_pointer_cast<C2DmaBufAllocation>(other);
+ return mHandle.bufferFd() == otherAsBuf->mHandle.bufferFd();
+}
+
+const C2Handle* C2DmaBufAllocation::handle() const {
+ return &mHandle;
+}
+
+C2DmaBufAllocation::~C2DmaBufAllocation() {
+ if (!mMappings.empty()) {
+ ALOGD("Dangling mappings!");
+ for (const Mapping& map : mMappings) {
+ int err = munmap(map.addr, map.size);
+ if (err) ALOGD("munmap failed");
+ }
+ }
+ if (mInit == C2_OK) {
+ native_handle_close(&mHandle);
+ }
+}
+
+C2DmaBufAllocation::C2DmaBufAllocation(BufferAllocator& alloc, size_t size, C2String heap_name,
+ unsigned flags, C2Allocator::id_t id)
+ : C2LinearAllocation(size), mHandle(-1, 0) {
+ int bufferFd = -1;
+ int ret = 0;
+
+ bufferFd = alloc.Alloc(heap_name, size, flags);
+ if (bufferFd < 0) ret = bufferFd;
+
+ mHandle = C2HandleBuf(bufferFd, size);
+ mId = id;
+ mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
+}
+
+C2DmaBufAllocation::C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id)
+ : C2LinearAllocation(size), mHandle(-1, 0) {
+ mHandle = C2HandleBuf(shareFd, size);
+ mId = id;
+ mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(0));
+}
+
+/* =========================== DMABUF ALLOCATOR =========================== */
+C2DmaBufAllocator::C2DmaBufAllocator(id_t id) : mInit(C2_OK) {
+ C2MemoryUsage minUsage = {0, 0};
+ C2MemoryUsage maxUsage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+ Traits traits = {"android.allocator.dmabuf", id, LINEAR, minUsage, maxUsage};
+ mTraits = std::make_shared<Traits>(traits);
+}
+
+C2Allocator::id_t C2DmaBufAllocator::getId() const {
+ std::lock_guard<std::mutex> lock(mUsageMapperLock);
+ return mTraits->id;
+}
+
+C2String C2DmaBufAllocator::getName() const {
+ std::lock_guard<std::mutex> lock(mUsageMapperLock);
+ return mTraits->name;
+}
+
+std::shared_ptr<const C2Allocator::Traits> C2DmaBufAllocator::getTraits() const {
+ std::lock_guard<std::mutex> lock(mUsageMapperLock);
+ return mTraits;
+}
+
+void C2DmaBufAllocator::setUsageMapper(const UsageMapperFn& mapper __unused, uint64_t minUsage,
+ uint64_t maxUsage, uint64_t blockSize) {
+ std::lock_guard<std::mutex> lock(mUsageMapperLock);
+ mUsageMapperCache.clear();
+ mUsageMapperLru.clear();
+ mUsageMapper = mapper;
+ Traits traits = {mTraits->name, mTraits->id, LINEAR, C2MemoryUsage(minUsage),
+ C2MemoryUsage(maxUsage)};
+ mTraits = std::make_shared<Traits>(traits);
+ mBlockSize = blockSize;
+}
+
+std::size_t C2DmaBufAllocator::MapperKeyHash::operator()(const MapperKey& k) const {
+ return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
+}
+
+c2_status_t C2DmaBufAllocator::mapUsage(C2MemoryUsage usage, size_t capacity, C2String* heap_name,
+ unsigned* flags) {
+ std::lock_guard<std::mutex> lock(mUsageMapperLock);
+ c2_status_t res = C2_OK;
+ // align capacity
+ capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
+ MapperKey key = std::make_pair(usage.expected, capacity);
+ auto entry = mUsageMapperCache.find(key);
+ if (entry == mUsageMapperCache.end()) {
+ if (mUsageMapper) {
+ res = mUsageMapper(usage, capacity, heap_name, flags);
+ } else {
+ // No system-uncached yet, so disabled for now
+ if (0 && !(usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)))
+ *heap_name = "system-uncached";
+ else
+ *heap_name = "system";
+ *flags = 0;
+ res = C2_NO_INIT;
+ }
+ // add usage to cache
+ MapperValue value = std::make_tuple(*heap_name, *flags, res);
+ mUsageMapperLru.emplace_front(key, value);
+ mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
+ if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
+ // remove LRU entry
+ MapperKey lruKey = mUsageMapperLru.front().first;
+ mUsageMapperCache.erase(lruKey);
+ mUsageMapperLru.pop_back();
+ }
+ } else {
+ // move entry to MRU
+ mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
+ const MapperValue& value = entry->second->second;
+ std::tie(*heap_name, *flags, res) = value;
+ }
+ return res;
+}
+
+c2_status_t C2DmaBufAllocator::newLinearAllocation(
+ uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation>* allocation) {
+ if (allocation == nullptr) {
+ return C2_BAD_VALUE;
+ }
+
+ allocation->reset();
+ if (mInit != C2_OK) {
+ return mInit;
+ }
+
+ C2String heap_name;
+ unsigned flags = 0;
+ c2_status_t ret = mapUsage(usage, capacity, &heap_name, &flags);
+ if (ret && ret != C2_NO_INIT) {
+ return ret;
+ }
+
+ std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
+ mBufferAllocator, capacity, heap_name, flags, getId());
+ ret = alloc->status();
+ if (ret == C2_OK) {
+ *allocation = alloc;
+ }
+ return ret;
+}
+
+c2_status_t C2DmaBufAllocator::priorLinearAllocation(
+ const C2Handle* handle, std::shared_ptr<C2LinearAllocation>* allocation) {
+ *allocation = nullptr;
+ if (mInit != C2_OK) {
+ return mInit;
+ }
+
+ if (!C2HandleBuf::IsValid(handle)) {
+ return C2_BAD_VALUE;
+ }
+
+ // TODO: get capacity and validate it
+ const C2HandleBuf* h = static_cast<const C2HandleBuf*>(handle);
+ std::shared_ptr<C2DmaBufAllocation> alloc =
+ std::make_shared<C2DmaBufAllocation>(h->size(), h->bufferFd(), getId());
+ c2_status_t ret = alloc->status();
+ if (ret == C2_OK) {
+ *allocation = alloc;
+ native_handle_delete(
+ const_cast<native_handle_t*>(reinterpret_cast<const native_handle_t*>(handle)));
+ }
+ return ret;
+}
+
+// static
+bool C2DmaBufAllocator::CheckHandle(const C2Handle* const o) {
+ return C2HandleBuf::IsValid(o);
+}
+
+} // namespace android