blob: 12f402796c618ebda994dbe1f27df4d389967fdc [file] [log] [blame]
Pawin Vongmasa36653902018-11-15 00:10:25 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "C2AllocatorIon"
19#include <utils/Log.h>
20
21#include <list>
22
23#include <ion/ion.h>
24#include <sys/mman.h>
25#include <unistd.h> // getpagesize, size_t, close, dup
26
27#include <C2AllocatorIon.h>
28#include <C2Buffer.h>
29#include <C2Debug.h>
30#include <C2ErrnoUtils.h>
shubang69397a92020-02-17 23:13:35 -080031#include <C2HandleIonInternal.h>
Pawin Vongmasa36653902018-11-15 00:10:25 -080032
Lajos Molnar0b2d2f92021-03-18 10:35:19 -070033#include <android-base/properties.h>
34
Pawin Vongmasa36653902018-11-15 00:10:25 -080035namespace android {
36
37namespace {
38 constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
Lajos Molnar0b2d2f92021-03-18 10:35:19 -070039
40 // max padding after ion/dmabuf allocations in bytes
41 constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
Pawin Vongmasa36653902018-11-15 00:10:25 -080042}
43
44/* size_t <=> int(lo), int(hi) conversions */
45constexpr inline int size2intLo(size_t s) {
46 return int(s & 0xFFFFFFFF);
47}
48
49constexpr inline int size2intHi(size_t s) {
50 // cast to uint64_t as size_t may be 32 bits wide
51 return int((uint64_t(s) >> 32) & 0xFFFFFFFF);
52}
53
54constexpr inline size_t ints2size(int intLo, int intHi) {
55 // convert in 2 stages to 64 bits as intHi may be negative
56 return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32);
57}
58
59/* ========================================= ION HANDLE ======================================== */
60/**
61 * ION handle
62 *
63 * There can be only a sole ion client per process, this is captured in the ion fd that is passed
64 * to the constructor, but this should be managed by the ion buffer allocator/mapper.
65 *
66 * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as
67 * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds
68 * a refcount.
69 *
70 * This handle will not capture mapped fd-s as updating that would require a global mutex.
71 */
72
Pawin Vongmasa36653902018-11-15 00:10:25 -080073const C2Handle C2HandleIon::cHeader = {
74 C2HandleIon::version,
75 C2HandleIon::numFds,
76 C2HandleIon::numInts,
77 {}
78};
79
80// static
John Stultz653ddd12020-09-19 05:26:24 +000081bool C2HandleIon::IsValid(const C2Handle * const o) {
Pawin Vongmasa36653902018-11-15 00:10:25 -080082 if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
83 return false;
84 }
85 const C2HandleIon *other = static_cast<const C2HandleIon*>(o);
86 return other->mInts.mMagic == kMagic;
87}
88
89// TODO: is the dup of an ion fd identical to ion_share?
90
91/* ======================================= ION ALLOCATION ====================================== */
92class C2AllocationIon : public C2LinearAllocation {
93public:
94 /* Interface methods */
95 virtual c2_status_t map(
96 size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence,
97 void **addr /* nonnull */) override;
98 virtual c2_status_t unmap(void *addr, size_t size, C2Fence *fenceFd) override;
99 virtual ~C2AllocationIon() override;
100 virtual const C2Handle *handle() const override;
101 virtual id_t getAllocatorId() const override;
102 virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const override;
103
104 // internal methods
105 C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
106 C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id);
107
108 c2_status_t status() const;
109
110protected:
111 class Impl;
Praveen Chavan86320e32019-01-22 14:47:04 -0800112 class ImplV2;
Pawin Vongmasa36653902018-11-15 00:10:25 -0800113 Impl *mImpl;
114
115 // TODO: we could make this encapsulate shared_ptr and copiable
116 C2_DO_NOT_COPY(C2AllocationIon);
117};
118
119class C2AllocationIon::Impl {
Praveen Chavan86320e32019-01-22 14:47:04 -0800120protected:
Pawin Vongmasa36653902018-11-15 00:10:25 -0800121 /**
122 * Constructs an ion allocation.
123 *
124 * \note We always create an ion allocation, even if the allocation or import fails
125 * so that we can capture the error.
126 *
127 * \param ionFd ion client (ownership transferred to created object)
128 * \param capacity size of allocation
129 * \param bufferFd buffer handle (ownership transferred to created object). Must be
130 * invalid if err is not 0.
131 * \param buffer ion buffer user handle (ownership transferred to created object). Must be
132 * invalid if err is not 0.
133 * \param err errno during buffer allocation or import
134 */
135 Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, C2Allocator::id_t id, int err)
136 : mIonFd(ionFd),
137 mHandle(bufferFd, capacity),
138 mBuffer(buffer),
139 mId(id),
140 mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)),
141 mMapFd(-1) {
142 if (mInit != C2_OK) {
143 // close ionFd now on error
144 if (mIonFd >= 0) {
145 close(mIonFd);
146 mIonFd = -1;
147 }
148 // C2_CHECK(bufferFd < 0);
149 // C2_CHECK(buffer < 0);
150 }
151 }
152
153public:
154 /**
155 * Constructs an ion allocation by importing a shared buffer fd.
156 *
157 * \param ionFd ion client (ownership transferred to created object)
158 * \param capacity size of allocation
159 * \param bufferFd buffer handle (ownership transferred to created object)
160 *
161 * \return created ion allocation (implementation) which may be invalid if the
162 * import failed.
163 */
Praveen Chavan86320e32019-01-22 14:47:04 -0800164 static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800165
166 /**
167 * Constructs an ion allocation by allocating an ion buffer.
168 *
169 * \param ionFd ion client (ownership transferred to created object)
170 * \param size size of allocation
171 * \param align desired alignment of allocation
172 * \param heapMask mask of heaps considered
173 * \param flags ion allocation flags
174 *
175 * \return created ion allocation (implementation) which may be invalid if the
176 * allocation failed.
177 */
Praveen Chavan86320e32019-01-22 14:47:04 -0800178 static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800179
180 c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
181 (void)fence; // TODO: wait for fence
182 *addr = nullptr;
183 if (!mMappings.empty()) {
184 ALOGV("multiple map");
185 // TODO: technically we should return DUPLICATE here, but our block views don't
186 // actually unmap, so we end up remapping an ion buffer multiple times.
187 //
188 // return C2_DUPLICATE;
189 }
190 if (size == 0) {
191 return C2_BAD_VALUE;
192 }
193
194 int prot = PROT_NONE;
195 int flags = MAP_SHARED;
196 if (usage.expected & C2MemoryUsage::CPU_READ) {
197 prot |= PROT_READ;
198 }
199 if (usage.expected & C2MemoryUsage::CPU_WRITE) {
200 prot |= PROT_WRITE;
201 }
202
203 size_t alignmentBytes = offset % PAGE_SIZE;
204 size_t mapOffset = offset - alignmentBytes;
205 size_t mapSize = size + alignmentBytes;
206 Mapping map = { nullptr, alignmentBytes, mapSize };
207
Praveen Chavan86320e32019-01-22 14:47:04 -0800208 c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800209 if (map.addr) {
210 mMappings.push_back(map);
211 }
212 return err;
213 }
214
215 c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
Praveen Chavan86320e32019-01-22 14:47:04 -0800216 if (mMappings.empty()) {
Pawin Vongmasa36653902018-11-15 00:10:25 -0800217 ALOGD("tried to unmap unmapped buffer");
218 return C2_NOT_FOUND;
219 }
220 for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
221 if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
222 size + it->alignmentBytes != it->size) {
223 continue;
224 }
225 int err = munmap(it->addr, it->size);
226 if (err != 0) {
227 ALOGD("munmap failed");
228 return c2_map_errno<EINVAL>(errno);
229 }
230 if (fence) {
231 *fence = C2Fence(); // not using fences
232 }
233 (void)mMappings.erase(it);
Wonsik Kimfb7a7672019-12-27 17:13:33 -0800234 ALOGV("successfully unmapped: addr=%p size=%zu fd=%d", addr, size, mHandle.bufferFd());
Pawin Vongmasa36653902018-11-15 00:10:25 -0800235 return C2_OK;
236 }
237 ALOGD("unmap failed to find specified map");
238 return C2_BAD_VALUE;
239 }
240
Praveen Chavan86320e32019-01-22 14:47:04 -0800241 virtual ~Impl() {
Pawin Vongmasa36653902018-11-15 00:10:25 -0800242 if (!mMappings.empty()) {
243 ALOGD("Dangling mappings!");
244 for (const Mapping &map : mMappings) {
245 (void)munmap(map.addr, map.size);
246 }
247 }
248 if (mMapFd >= 0) {
249 close(mMapFd);
250 mMapFd = -1;
251 }
252 if (mInit == C2_OK) {
Praveen Chavan86320e32019-01-22 14:47:04 -0800253 if (mBuffer >= 0) {
254 (void)ion_free(mIonFd, mBuffer);
255 }
Pawin Vongmasa36653902018-11-15 00:10:25 -0800256 native_handle_close(&mHandle);
257 }
258 if (mIonFd >= 0) {
259 close(mIonFd);
260 }
261 }
262
263 c2_status_t status() const {
264 return mInit;
265 }
266
267 const C2Handle *handle() const {
268 return &mHandle;
269 }
270
271 C2Allocator::id_t getAllocatorId() const {
272 return mId;
273 }
274
Praveen Chavan86320e32019-01-22 14:47:04 -0800275 virtual ion_user_handle_t ionHandle() const {
Pawin Vongmasa36653902018-11-15 00:10:25 -0800276 return mBuffer;
277 }
278
Praveen Chavan86320e32019-01-22 14:47:04 -0800279protected:
280 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
281 int prot, int flags, void** base, void** addr) {
282 c2_status_t err = C2_OK;
283 if (mMapFd == -1) {
284 int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
285 flags, mapOffset, (unsigned char**)base, &mMapFd);
286 ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
287 "offset = %zu) returned (%d)",
288 mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
289 if (ret) {
290 mMapFd = -1;
291 *base = *addr = nullptr;
292 err = c2_map_errno<EINVAL>(-ret);
293 } else {
294 *addr = (uint8_t *)*base + alignmentBytes;
295 }
296 } else {
297 *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
298 ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
299 "returned (%d)",
300 mapSize, prot, flags, mMapFd, mapOffset, errno);
301 if (*base == MAP_FAILED) {
302 *base = *addr = nullptr;
303 err = c2_map_errno<EINVAL>(errno);
304 } else {
305 *addr = (uint8_t *)*base + alignmentBytes;
306 }
307 }
308 return err;
309 }
310
Pawin Vongmasa36653902018-11-15 00:10:25 -0800311 int mIonFd;
312 C2HandleIon mHandle;
313 ion_user_handle_t mBuffer;
314 C2Allocator::id_t mId;
315 c2_status_t mInit;
316 int mMapFd; // only one for now
317 struct Mapping {
318 void *addr;
319 size_t alignmentBytes;
320 size_t size;
321 };
322 std::list<Mapping> mMappings;
323};
324
Praveen Chavan86320e32019-01-22 14:47:04 -0800325class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
326public:
327 /**
328 * Constructs an ion allocation for platforms with new (ion_4.12.h) api
329 *
330 * \note We always create an ion allocation, even if the allocation or import fails
331 * so that we can capture the error.
332 *
333 * \param ionFd ion client (ownership transferred to created object)
334 * \param capacity size of allocation
335 * \param bufferFd buffer handle (ownership transferred to created object). Must be
336 * invalid if err is not 0.
337 * \param err errno during buffer allocation or import
338 */
339 ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err)
340 : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) {
341 }
342
343 virtual ~ImplV2() = default;
344
345 virtual ion_user_handle_t ionHandle() const {
346 return mHandle.bufferFd();
347 }
348
349protected:
350 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
351 int prot, int flags, void** base, void** addr) {
352 c2_status_t err = C2_OK;
353 *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
354 ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
355 "returned (%d)",
356 mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
357 if (*base == MAP_FAILED) {
358 *base = *addr = nullptr;
359 err = c2_map_errno<EINVAL>(errno);
360 } else {
361 *addr = (uint8_t *)*base + alignmentBytes;
362 }
363 return err;
364 }
365
366};
367
368C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd,
369 C2Allocator::id_t id) {
370 int ret = 0;
371 if (ion_is_legacy(ionFd)) {
372 ion_user_handle_t buffer = -1;
373 ret = ion_import(ionFd, bufferFd, &buffer);
374 return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
375 } else {
376 return new ImplV2(ionFd, capacity, bufferFd, id, ret);
377 }
378}
379
380C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align,
381 unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
382 int bufferFd = -1;
383 ion_user_handle_t buffer = -1;
Lajos Molnar0b2d2f92021-03-18 10:35:19 -0700384 // NOTE: read this property directly from the property as this code has to run on
385 // Android Q, but the sysprop was only introduced in Android S.
386 static size_t sPadding =
387 base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
388 if (sPadding > SIZE_MAX - size) {
389 ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx", size, sPadding);
390 // use ImplV2 as there is no allocation anyways
391 return new ImplV2(ionFd, size, -1, id, -ENOMEM);
392 }
393
394 size_t allocSize = size + sPadding;
395 if (align) {
396 if (align - 1 > SIZE_MAX - allocSize) {
397 ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx and alignment %#zx",
398 size, sPadding, align);
399 // use ImplV2 as there is no allocation anyways
400 return new ImplV2(ionFd, size, -1, id, -ENOMEM);
401 }
402 allocSize += align - 1;
403 allocSize &= ~(align - 1);
404 }
Praveen Chavan86320e32019-01-22 14:47:04 -0800405 int ret;
406
407 if (ion_is_legacy(ionFd)) {
Lajos Molnar0b2d2f92021-03-18 10:35:19 -0700408 ret = ion_alloc(ionFd, allocSize, align, heapMask, flags, &buffer);
Praveen Chavan86320e32019-01-22 14:47:04 -0800409 ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
410 "returned (%d) ; buffer = %d",
Lajos Molnar0b2d2f92021-03-18 10:35:19 -0700411 ionFd, allocSize, align, heapMask, flags, ret, buffer);
Praveen Chavan86320e32019-01-22 14:47:04 -0800412 if (ret == 0) {
413 // get buffer fd for native handle constructor
414 ret = ion_share(ionFd, buffer, &bufferFd);
415 if (ret != 0) {
416 ion_free(ionFd, buffer);
417 buffer = -1;
418 }
419 }
Lajos Molnar0b2d2f92021-03-18 10:35:19 -0700420 return new Impl(ionFd, size, bufferFd, buffer, id, ret);
Praveen Chavan86320e32019-01-22 14:47:04 -0800421
422 } else {
Lajos Molnar0b2d2f92021-03-18 10:35:19 -0700423 ret = ion_alloc_fd(ionFd, allocSize, align, heapMask, flags, &bufferFd);
Praveen Chavan86320e32019-01-22 14:47:04 -0800424 ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
425 "returned (%d) ; bufferFd = %d",
Lajos Molnar0b2d2f92021-03-18 10:35:19 -0700426 ionFd, allocSize, align, heapMask, flags, ret, bufferFd);
Praveen Chavan86320e32019-01-22 14:47:04 -0800427
Lajos Molnar0b2d2f92021-03-18 10:35:19 -0700428 return new ImplV2(ionFd, size, bufferFd, id, ret);
Praveen Chavan86320e32019-01-22 14:47:04 -0800429 }
430}
431
Pawin Vongmasa36653902018-11-15 00:10:25 -0800432c2_status_t C2AllocationIon::map(
433 size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
434 return mImpl->map(offset, size, usage, fence, addr);
435}
436
437c2_status_t C2AllocationIon::unmap(void *addr, size_t size, C2Fence *fence) {
438 return mImpl->unmap(addr, size, fence);
439}
440
441c2_status_t C2AllocationIon::status() const {
442 return mImpl->status();
443}
444
445C2Allocator::id_t C2AllocationIon::getAllocatorId() const {
446 return mImpl->getAllocatorId();
447}
448
449bool C2AllocationIon::equals(const std::shared_ptr<C2LinearAllocation> &other) const {
450 if (!other || other->getAllocatorId() != getAllocatorId()) {
451 return false;
452 }
453 // get user handle to compare objects
454 std::shared_ptr<C2AllocationIon> otherAsIon = std::static_pointer_cast<C2AllocationIon>(other);
455 return mImpl->ionHandle() == otherAsIon->mImpl->ionHandle();
456}
457
458const C2Handle *C2AllocationIon::handle() const {
459 return mImpl->handle();
460}
461
462C2AllocationIon::~C2AllocationIon() {
463 delete mImpl;
464}
465
466C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align,
467 unsigned heapMask, unsigned flags, C2Allocator::id_t id)
468 : C2LinearAllocation(size),
469 mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags, id)) { }
470
471C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id)
472 : C2LinearAllocation(size),
473 mImpl(Impl::Import(ionFd, size, shareFd, id)) { }
474
475/* ======================================= ION ALLOCATOR ====================================== */
476C2AllocatorIon::C2AllocatorIon(id_t id)
477 : mInit(C2_OK),
478 mIonFd(ion_open()) {
479 if (mIonFd < 0) {
480 switch (errno) {
481 case ENOENT: mInit = C2_OMITTED; break;
482 default: mInit = c2_map_errno<EACCES>(errno); break;
483 }
484 } else {
485 C2MemoryUsage minUsage = { 0, 0 };
486 C2MemoryUsage maxUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
487 Traits traits = { "android.allocator.ion", id, LINEAR, minUsage, maxUsage };
488 mTraits = std::make_shared<Traits>(traits);
489 mBlockSize = ::getpagesize();
490 }
491}
492
493C2AllocatorIon::~C2AllocatorIon() {
494 if (mInit == C2_OK) {
495 ion_close(mIonFd);
496 }
497}
498
499C2Allocator::id_t C2AllocatorIon::getId() const {
500 std::lock_guard<std::mutex> lock(mUsageMapperLock);
501 return mTraits->id;
502}
503
504C2String C2AllocatorIon::getName() const {
505 std::lock_guard<std::mutex> lock(mUsageMapperLock);
506 return mTraits->name;
507}
508
509std::shared_ptr<const C2Allocator::Traits> C2AllocatorIon::getTraits() const {
510 std::lock_guard<std::mutex> lock(mUsageMapperLock);
511 return mTraits;
512}
513
514void C2AllocatorIon::setUsageMapper(
515 const UsageMapperFn &mapper, uint64_t minUsage, uint64_t maxUsage, uint64_t blockSize) {
516 std::lock_guard<std::mutex> lock(mUsageMapperLock);
517 mUsageMapperCache.clear();
518 mUsageMapperLru.clear();
519 mUsageMapper = mapper;
520 Traits traits = {
521 mTraits->name, mTraits->id, LINEAR,
522 C2MemoryUsage(minUsage), C2MemoryUsage(maxUsage)
523 };
524 mTraits = std::make_shared<Traits>(traits);
525 mBlockSize = blockSize;
526}
527
528std::size_t C2AllocatorIon::MapperKeyHash::operator()(const MapperKey &k) const {
529 return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
530}
531
532c2_status_t C2AllocatorIon::mapUsage(
533 C2MemoryUsage usage, size_t capacity, size_t *align, unsigned *heapMask, unsigned *flags) {
534 std::lock_guard<std::mutex> lock(mUsageMapperLock);
535 c2_status_t res = C2_OK;
536 // align capacity
537 capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
538 MapperKey key = std::make_pair(usage.expected, capacity);
539 auto entry = mUsageMapperCache.find(key);
540 if (entry == mUsageMapperCache.end()) {
541 if (mUsageMapper) {
542 res = mUsageMapper(usage, capacity, align, heapMask, flags);
543 } else {
544 *align = 0; // TODO make this 1
545 *heapMask = ~0; // default mask
Lajos Molnar8eec9d02019-05-22 15:08:49 -0700546 if (usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)) {
547 *flags = ION_FLAG_CACHED; // cache CPU accessed buffers
548 } else {
549 *flags = 0; // default flags
550 }
Pawin Vongmasa36653902018-11-15 00:10:25 -0800551 res = C2_NO_INIT;
552 }
553 // add usage to cache
554 MapperValue value = std::make_tuple(*align, *heapMask, *flags, res);
555 mUsageMapperLru.emplace_front(key, value);
556 mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
557 if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
558 // remove LRU entry
559 MapperKey lruKey = mUsageMapperLru.front().first;
560 mUsageMapperCache.erase(lruKey);
561 mUsageMapperLru.pop_back();
562 }
563 } else {
564 // move entry to MRU
565 mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
566 const MapperValue &value = entry->second->second;
567 std::tie(*align, *heapMask, *flags, res) = value;
568 }
569 return res;
570}
571
572c2_status_t C2AllocatorIon::newLinearAllocation(
573 uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation> *allocation) {
574 if (allocation == nullptr) {
575 return C2_BAD_VALUE;
576 }
577
578 allocation->reset();
579 if (mInit != C2_OK) {
580 return mInit;
581 }
582
583 size_t align = 0;
584 unsigned heapMask = ~0;
585 unsigned flags = 0;
586 c2_status_t ret = mapUsage(usage, capacity, &align, &heapMask, &flags);
587 if (ret && ret != C2_NO_INIT) {
588 return ret;
589 }
590
591 std::shared_ptr<C2AllocationIon> alloc
Mengjie Xie51f5aba2019-10-09 15:00:00 +0800592 = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, getId());
Pawin Vongmasa36653902018-11-15 00:10:25 -0800593 ret = alloc->status();
594 if (ret == C2_OK) {
595 *allocation = alloc;
596 }
597 return ret;
598}
599
600c2_status_t C2AllocatorIon::priorLinearAllocation(
601 const C2Handle *handle, std::shared_ptr<C2LinearAllocation> *allocation) {
602 *allocation = nullptr;
603 if (mInit != C2_OK) {
604 return mInit;
605 }
606
John Stultz653ddd12020-09-19 05:26:24 +0000607 if (!C2HandleIon::IsValid(handle)) {
Pawin Vongmasa36653902018-11-15 00:10:25 -0800608 return C2_BAD_VALUE;
609 }
610
611 // TODO: get capacity and validate it
612 const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
613 std::shared_ptr<C2AllocationIon> alloc
Mengjie Xie51f5aba2019-10-09 15:00:00 +0800614 = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), getId());
Pawin Vongmasa36653902018-11-15 00:10:25 -0800615 c2_status_t ret = alloc->status();
616 if (ret == C2_OK) {
617 *allocation = alloc;
618 native_handle_delete(const_cast<native_handle_t*>(
619 reinterpret_cast<const native_handle_t*>(handle)));
620 }
621 return ret;
622}
623
John Stultz653ddd12020-09-19 05:26:24 +0000624bool C2AllocatorIon::CheckHandle(const C2Handle* const o) {
625 return C2HandleIon::IsValid(o);
Pawin Vongmasa36653902018-11-15 00:10:25 -0800626}
627
628} // namespace android