blob: 59e82e230fbc140eb0db10f5882d9be536c9c4f7 [file] [log] [blame]
John Stultz6a407882020-07-11 04:34:19 +00001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "C2DmaBufAllocator"
19#include <BufferAllocator/BufferAllocator.h>
20#include <C2Buffer.h>
21#include <C2Debug.h>
22#include <C2DmaBufAllocator.h>
23#include <C2ErrnoUtils.h>
24#include <linux/ion.h>
25#include <sys/mman.h>
26#include <unistd.h> // getpagesize, size_t, close, dup
27#include <utils/Log.h>
28
29#include <list>
30
31#ifdef __ANDROID_APEX__
32#include <android-base/properties.h>
33#endif
34
35namespace android {
36
37namespace {
38constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
39}
40
41/* =========================== BUFFER HANDLE =========================== */
42/**
43 * Buffer handle
44 *
45 * Stores dmabuf fd & metadata
46 *
47 * This handle will not capture mapped fd-s as updating that would require a
48 * global mutex.
49 */
50
51struct C2HandleBuf : public C2Handle {
52 C2HandleBuf(int bufferFd, size_t size)
53 : C2Handle(cHeader),
54 mFds{bufferFd},
55 mInts{int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic} {}
56
57 static bool IsValid(const C2Handle* const o);
58
59 int bufferFd() const { return mFds.mBuffer; }
60 size_t size() const {
61 return size_t(unsigned(mInts.mSizeLo)) | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32);
62 }
63
64 protected:
65 struct {
66 int mBuffer; // dmabuf fd
67 } mFds;
68 struct {
69 int mSizeLo; // low 32-bits of size
70 int mSizeHi; // high 32-bits of size
71 int mMagic;
72 } mInts;
73
74 private:
75 typedef C2HandleBuf _type;
76 enum {
77 kMagic = '\xc2io\x00',
78 numFds = sizeof(mFds) / sizeof(int),
79 numInts = sizeof(mInts) / sizeof(int),
80 version = sizeof(C2Handle)
81 };
82 // constexpr static C2Handle cHeader = { version, numFds, numInts, {} };
83 const static C2Handle cHeader;
84};
85
86const C2Handle C2HandleBuf::cHeader = {
87 C2HandleBuf::version, C2HandleBuf::numFds, C2HandleBuf::numInts, {}};
88
89// static
90bool C2HandleBuf::IsValid(const C2Handle* const o) {
91 if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
92 return false;
93 }
94 const C2HandleBuf* other = static_cast<const C2HandleBuf*>(o);
95 return other->mInts.mMagic == kMagic;
96}
97
98/* =========================== DMABUF ALLOCATION =========================== */
99class C2DmaBufAllocation : public C2LinearAllocation {
100 public:
101 /* Interface methods */
102 virtual c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
103 void** addr /* nonnull */) override;
104 virtual c2_status_t unmap(void* addr, size_t size, C2Fence* fenceFd) override;
105 virtual ~C2DmaBufAllocation() override;
106 virtual const C2Handle* handle() const override;
107 virtual id_t getAllocatorId() const override;
108 virtual bool equals(const std::shared_ptr<C2LinearAllocation>& other) const override;
109
110 // internal methods
111 C2DmaBufAllocation(BufferAllocator& alloc, size_t size, C2String heap_name, unsigned flags,
112 C2Allocator::id_t id);
113 C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id);
114
115 c2_status_t status() const;
116
117 protected:
118 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
119 int prot, int flags, void** base, void** addr) {
120 c2_status_t err = C2_OK;
121 *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
122 ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
123 "returned (%d)",
124 mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
125 if (*base == MAP_FAILED) {
126 *base = *addr = nullptr;
127 err = c2_map_errno<EINVAL>(errno);
128 } else {
129 *addr = (uint8_t*)*base + alignmentBytes;
130 }
131 return err;
132 }
133
134 C2Allocator::id_t mId;
135 C2HandleBuf mHandle;
136 c2_status_t mInit;
137 struct Mapping {
138 void* addr;
139 size_t alignmentBytes;
140 size_t size;
141 };
142 std::list<Mapping> mMappings;
143
144 // TODO: we could make this encapsulate shared_ptr and copiable
145 C2_DO_NOT_COPY(C2DmaBufAllocation);
146};
147
148c2_status_t C2DmaBufAllocation::map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
149 void** addr) {
150 (void)fence; // TODO: wait for fence
151 *addr = nullptr;
152 if (!mMappings.empty()) {
153 ALOGV("multiple map");
154 // TODO: technically we should return DUPLICATE here, but our block views
155 // don't actually unmap, so we end up remapping the buffer multiple times.
156 //
157 // return C2_DUPLICATE;
158 }
159 if (size == 0) {
160 return C2_BAD_VALUE;
161 }
162
163 int prot = PROT_NONE;
164 int flags = MAP_SHARED;
165 if (usage.expected & C2MemoryUsage::CPU_READ) {
166 prot |= PROT_READ;
167 }
168 if (usage.expected & C2MemoryUsage::CPU_WRITE) {
169 prot |= PROT_WRITE;
170 }
171
172 size_t alignmentBytes = offset % PAGE_SIZE;
173 size_t mapOffset = offset - alignmentBytes;
174 size_t mapSize = size + alignmentBytes;
175 Mapping map = {nullptr, alignmentBytes, mapSize};
176
177 c2_status_t err =
178 mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
179 if (map.addr) {
180 mMappings.push_back(map);
181 }
182 return err;
183}
184
185c2_status_t C2DmaBufAllocation::unmap(void* addr, size_t size, C2Fence* fence) {
186 if (mMappings.empty()) {
187 ALOGD("tried to unmap unmapped buffer");
188 return C2_NOT_FOUND;
189 }
190 for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
191 if (addr != (uint8_t*)it->addr + it->alignmentBytes ||
192 size + it->alignmentBytes != it->size) {
193 continue;
194 }
195 int err = munmap(it->addr, it->size);
196 if (err != 0) {
197 ALOGD("munmap failed");
198 return c2_map_errno<EINVAL>(errno);
199 }
200 if (fence) {
201 *fence = C2Fence(); // not using fences
202 }
203 (void)mMappings.erase(it);
204 ALOGV("successfully unmapped: %d", mHandle.bufferFd());
205 return C2_OK;
206 }
207 ALOGD("unmap failed to find specified map");
208 return C2_BAD_VALUE;
209}
210
211c2_status_t C2DmaBufAllocation::status() const {
212 return mInit;
213}
214
215C2Allocator::id_t C2DmaBufAllocation::getAllocatorId() const {
216 return mId;
217}
218
219bool C2DmaBufAllocation::equals(const std::shared_ptr<C2LinearAllocation>& other) const {
220 if (!other || other->getAllocatorId() != getAllocatorId()) {
221 return false;
222 }
223 // get user handle to compare objects
224 std::shared_ptr<C2DmaBufAllocation> otherAsBuf =
225 std::static_pointer_cast<C2DmaBufAllocation>(other);
226 return mHandle.bufferFd() == otherAsBuf->mHandle.bufferFd();
227}
228
229const C2Handle* C2DmaBufAllocation::handle() const {
230 return &mHandle;
231}
232
233C2DmaBufAllocation::~C2DmaBufAllocation() {
234 if (!mMappings.empty()) {
235 ALOGD("Dangling mappings!");
236 for (const Mapping& map : mMappings) {
237 int err = munmap(map.addr, map.size);
238 if (err) ALOGD("munmap failed");
239 }
240 }
241 if (mInit == C2_OK) {
242 native_handle_close(&mHandle);
243 }
244}
245
246C2DmaBufAllocation::C2DmaBufAllocation(BufferAllocator& alloc, size_t size, C2String heap_name,
247 unsigned flags, C2Allocator::id_t id)
248 : C2LinearAllocation(size), mHandle(-1, 0) {
249 int bufferFd = -1;
250 int ret = 0;
251
252 bufferFd = alloc.Alloc(heap_name, size, flags);
253 if (bufferFd < 0) ret = bufferFd;
254
255 mHandle = C2HandleBuf(bufferFd, size);
256 mId = id;
257 mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
258}
259
260C2DmaBufAllocation::C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id)
261 : C2LinearAllocation(size), mHandle(-1, 0) {
262 mHandle = C2HandleBuf(shareFd, size);
263 mId = id;
264 mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(0));
265}
266
267/* =========================== DMABUF ALLOCATOR =========================== */
268C2DmaBufAllocator::C2DmaBufAllocator(id_t id) : mInit(C2_OK) {
269 C2MemoryUsage minUsage = {0, 0};
270 C2MemoryUsage maxUsage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
271 Traits traits = {"android.allocator.dmabuf", id, LINEAR, minUsage, maxUsage};
272 mTraits = std::make_shared<Traits>(traits);
273}
274
275C2Allocator::id_t C2DmaBufAllocator::getId() const {
276 std::lock_guard<std::mutex> lock(mUsageMapperLock);
277 return mTraits->id;
278}
279
280C2String C2DmaBufAllocator::getName() const {
281 std::lock_guard<std::mutex> lock(mUsageMapperLock);
282 return mTraits->name;
283}
284
285std::shared_ptr<const C2Allocator::Traits> C2DmaBufAllocator::getTraits() const {
286 std::lock_guard<std::mutex> lock(mUsageMapperLock);
287 return mTraits;
288}
289
290void C2DmaBufAllocator::setUsageMapper(const UsageMapperFn& mapper __unused, uint64_t minUsage,
291 uint64_t maxUsage, uint64_t blockSize) {
292 std::lock_guard<std::mutex> lock(mUsageMapperLock);
293 mUsageMapperCache.clear();
294 mUsageMapperLru.clear();
295 mUsageMapper = mapper;
296 Traits traits = {mTraits->name, mTraits->id, LINEAR, C2MemoryUsage(minUsage),
297 C2MemoryUsage(maxUsage)};
298 mTraits = std::make_shared<Traits>(traits);
299 mBlockSize = blockSize;
300}
301
302std::size_t C2DmaBufAllocator::MapperKeyHash::operator()(const MapperKey& k) const {
303 return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
304}
305
306c2_status_t C2DmaBufAllocator::mapUsage(C2MemoryUsage usage, size_t capacity, C2String* heap_name,
307 unsigned* flags) {
308 std::lock_guard<std::mutex> lock(mUsageMapperLock);
309 c2_status_t res = C2_OK;
310 // align capacity
311 capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
312 MapperKey key = std::make_pair(usage.expected, capacity);
313 auto entry = mUsageMapperCache.find(key);
314 if (entry == mUsageMapperCache.end()) {
315 if (mUsageMapper) {
316 res = mUsageMapper(usage, capacity, heap_name, flags);
317 } else {
318 // No system-uncached yet, so disabled for now
319 if (0 && !(usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)))
320 *heap_name = "system-uncached";
321 else
322 *heap_name = "system";
323 *flags = 0;
324 res = C2_NO_INIT;
325 }
326 // add usage to cache
327 MapperValue value = std::make_tuple(*heap_name, *flags, res);
328 mUsageMapperLru.emplace_front(key, value);
329 mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
330 if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
331 // remove LRU entry
332 MapperKey lruKey = mUsageMapperLru.front().first;
333 mUsageMapperCache.erase(lruKey);
334 mUsageMapperLru.pop_back();
335 }
336 } else {
337 // move entry to MRU
338 mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
339 const MapperValue& value = entry->second->second;
340 std::tie(*heap_name, *flags, res) = value;
341 }
342 return res;
343}
344
345c2_status_t C2DmaBufAllocator::newLinearAllocation(
346 uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation>* allocation) {
347 if (allocation == nullptr) {
348 return C2_BAD_VALUE;
349 }
350
351 allocation->reset();
352 if (mInit != C2_OK) {
353 return mInit;
354 }
355
356 C2String heap_name;
357 unsigned flags = 0;
358 c2_status_t ret = mapUsage(usage, capacity, &heap_name, &flags);
359 if (ret && ret != C2_NO_INIT) {
360 return ret;
361 }
362
363 std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
364 mBufferAllocator, capacity, heap_name, flags, getId());
365 ret = alloc->status();
366 if (ret == C2_OK) {
367 *allocation = alloc;
368 }
369 return ret;
370}
371
372c2_status_t C2DmaBufAllocator::priorLinearAllocation(
373 const C2Handle* handle, std::shared_ptr<C2LinearAllocation>* allocation) {
374 *allocation = nullptr;
375 if (mInit != C2_OK) {
376 return mInit;
377 }
378
379 if (!C2HandleBuf::IsValid(handle)) {
380 return C2_BAD_VALUE;
381 }
382
383 // TODO: get capacity and validate it
384 const C2HandleBuf* h = static_cast<const C2HandleBuf*>(handle);
385 std::shared_ptr<C2DmaBufAllocation> alloc =
386 std::make_shared<C2DmaBufAllocation>(h->size(), h->bufferFd(), getId());
387 c2_status_t ret = alloc->status();
388 if (ret == C2_OK) {
389 *allocation = alloc;
390 native_handle_delete(
391 const_cast<native_handle_t*>(reinterpret_cast<const native_handle_t*>(handle)));
392 }
393 return ret;
394}
395
396// static
397bool C2DmaBufAllocator::CheckHandle(const C2Handle* const o) {
398 return C2HandleBuf::IsValid(o);
399}
400
401} // namespace android