blob: 06f99fb66de60eff0899edc04e1d9674629ed23c [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <errno.h>
29#include <pthread.h>
30#include <stdio.h>
31#include <arpa/inet.h>
32#include <sys/socket.h>
33#include <stdlib.h>
34#include <string.h>
35#include <unistd.h>
36#include <errno.h>
37#include <stddef.h>
38#include <stdarg.h>
39#include <fcntl.h>
40#include <unwind.h>
41
42#include <sys/socket.h>
43#include <sys/un.h>
44#include <sys/select.h>
45#include <sys/types.h>
46#include <sys/system_properties.h>
47
48#include "dlmalloc.h"
49#include "logd.h"
50
51// =============================================================================
52// Utilities directly used by Dalvik
53// =============================================================================
54
55#define HASHTABLE_SIZE 1543
56#define BACKTRACE_SIZE 32
57/* flag definitions, currently sharing storage with "size" */
58#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
59#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
60
61#define MAX_SIZE_T (~(size_t)0)
62
63/*
64 * In a VM process, this is set to 1 after fork()ing out of zygote.
65 */
66int gMallocLeakZygoteChild = 0;
67
68// =============================================================================
69// Structures
70// =============================================================================
71
72typedef struct HashEntry HashEntry;
73struct HashEntry {
74 size_t slot;
75 HashEntry* prev;
76 HashEntry* next;
77 size_t numEntries;
78 // fields above "size" are NOT sent to the host
79 size_t size;
80 size_t allocations;
81 intptr_t backtrace[0];
82};
83
84typedef struct HashTable HashTable;
85struct HashTable {
86 size_t count;
87 HashEntry* slots[HASHTABLE_SIZE];
88};
89
90static pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
91static HashTable gHashTable;
92
93// =============================================================================
Andy McFadden39f37452009-07-21 15:25:23 -070094// log functions
95// =============================================================================
96
97#define debug_log(format, ...) \
98 __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak", (format), ##__VA_ARGS__ )
99
100// =============================================================================
101// output functions
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800102// =============================================================================
103
104static int hash_entry_compare(const void* arg1, const void* arg2)
105{
106 HashEntry* e1 = *(HashEntry**)arg1;
107 HashEntry* e2 = *(HashEntry**)arg2;
108
109 size_t nbAlloc1 = e1->allocations;
110 size_t nbAlloc2 = e2->allocations;
111 size_t size1 = e1->size & ~SIZE_FLAG_MASK;
112 size_t size2 = e2->size & ~SIZE_FLAG_MASK;
113 size_t alloc1 = nbAlloc1 * size1;
114 size_t alloc2 = nbAlloc2 * size2;
115
116 // sort in descending order by:
117 // 1) total size
118 // 2) number of allocations
119 //
120 // This is used for sorting, not determination of equality, so we don't
121 // need to compare the bit flags.
122 int result;
123 if (alloc1 > alloc2) {
124 result = -1;
125 } else if (alloc1 < alloc2) {
126 result = 1;
127 } else {
128 if (nbAlloc1 > nbAlloc2) {
129 result = -1;
130 } else if (nbAlloc1 < nbAlloc2) {
131 result = 1;
132 } else {
133 result = 0;
134 }
135 }
136 return result;
137}
138
139/*
140 * Retrieve native heap information.
141 *
142 * "*info" is set to a buffer we allocate
143 * "*overallSize" is set to the size of the "info" buffer
144 * "*infoSize" is set to the size of a single entry
145 * "*totalMemory" is set to the sum of all allocations we're tracking; does
146 * not include heap overhead
147 * "*backtraceSize" is set to the maximum number of entries in the back trace
148 */
149void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
150 size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
151{
152 // don't do anything if we have invalid arguments
153 if (info == NULL || overallSize == NULL || infoSize == NULL ||
154 totalMemory == NULL || backtraceSize == NULL) {
155 return;
156 }
157
158 pthread_mutex_lock(&gAllocationsMutex);
159
160 if (gHashTable.count == 0) {
161 *info = NULL;
162 *overallSize = 0;
163 *infoSize = 0;
164 *totalMemory = 0;
165 *backtraceSize = 0;
166 goto done;
167 }
168
169 void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
170
171 // debug_log("*****\ngHashTable.count = %d\n", gHashTable.count);
172 // debug_log("list = %p\n", list);
173
174 // get the entries into an array to be sorted
175 int index = 0;
176 int i;
177 for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
178 HashEntry* entry = gHashTable.slots[i];
179 while (entry != NULL) {
180 list[index] = entry;
181 *totalMemory = *totalMemory +
182 ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
183 index++;
184 entry = entry->next;
185 }
186 }
187
188 // debug_log("sorted list!\n");
189 // XXX: the protocol doesn't allow variable size for the stack trace (yet)
190 *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
191 *overallSize = *infoSize * gHashTable.count;
192 *backtraceSize = BACKTRACE_SIZE;
193
194 // debug_log("infoSize = 0x%x overall = 0x%x\n", *infoSize, *overallSize);
195 // now get A byte array big enough for this
196 *info = (uint8_t*)dlmalloc(*overallSize);
197
198 // debug_log("info = %p\n", info);
199 if (*info == NULL) {
200 *overallSize = 0;
201 goto done;
202 }
203
204 // debug_log("sorting list...\n");
205 qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
206
207 uint8_t* head = *info;
208 const int count = gHashTable.count;
209 for (i = 0 ; i < count ; i++) {
210 HashEntry* entry = list[i];
211 size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
212 if (entrySize < *infoSize) {
213 /* we're writing less than a full entry, clear out the rest */
André Goddard Rosa3f612122010-03-28 21:32:36 -0300214 memset(head + entrySize, 0, *infoSize - entrySize);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800215 } else {
216 /* make sure the amount we're copying doesn't exceed the limit */
217 entrySize = *infoSize;
218 }
219 memcpy(head, &(entry->size), entrySize);
220 head += *infoSize;
221 }
222
223 dlfree(list);
224
225done:
226 // debug_log("+++++ done!\n");
227 pthread_mutex_unlock(&gAllocationsMutex);
228}
229
230void free_malloc_leak_info(uint8_t* info)
231{
232 dlfree(info);
233}
234
235struct mallinfo mallinfo()
236{
237 return dlmallinfo();
238}
239
240void* valloc(size_t bytes) {
241 /* assume page size of 4096 bytes */
242 return memalign( getpagesize(), bytes );
243}
244
245
246/*
247 * Code guarded by MALLOC_LEAK_CHECK is only needed when malloc check is
248 * enabled. Currently we exclude them in libc.so, and only include them in
249 * libc_debug.so.
250 */
251#ifdef MALLOC_LEAK_CHECK
252#define MALLOC_ALIGNMENT 8
253#define GUARD 0x48151642
254
255#define DEBUG 0
256
257// =============================================================================
258// Structures
259// =============================================================================
260typedef struct AllocationEntry AllocationEntry;
261struct AllocationEntry {
262 HashEntry* entry;
263 uint32_t guard;
264};
265
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800266
267// =============================================================================
268// Hash Table functions
269// =============================================================================
270static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
271{
272 if (backtrace == NULL) return 0;
273
274 int hash = 0;
275 size_t i;
276 for (i = 0 ; i < numEntries ; i++) {
277 hash = (hash * 33) + (backtrace[i] >> 2);
278 }
279
280 return hash;
281}
282
283static HashEntry* find_entry(HashTable* table, int slot,
284 intptr_t* backtrace, size_t numEntries, size_t size)
285{
286 HashEntry* entry = table->slots[slot];
287 while (entry != NULL) {
288 //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
289 // backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
290 /*
291 * See if the entry matches exactly. We compare the "size" field,
292 * including the flag bits.
293 */
294 if (entry->size == size && entry->numEntries == numEntries &&
295 !memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) {
296 return entry;
297 }
298
299 entry = entry->next;
300 }
301
302 return NULL;
303}
304
305static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size)
306{
307 size_t hash = get_hash(backtrace, numEntries);
308 size_t slot = hash % HASHTABLE_SIZE;
309
310 if (size & SIZE_FLAG_MASK) {
311 debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
312 abort();
313 }
314
315 if (gMallocLeakZygoteChild)
316 size |= SIZE_FLAG_ZYGOTE_CHILD;
317
318 HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
319
320 if (entry != NULL) {
321 entry->allocations++;
322 } else {
323 // create a new entry
324 entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
325 entry->allocations = 1;
326 entry->slot = slot;
327 entry->prev = NULL;
328 entry->next = gHashTable.slots[slot];
329 entry->numEntries = numEntries;
330 entry->size = size;
331
332 memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t));
333
334 gHashTable.slots[slot] = entry;
335
336 if (entry->next != NULL) {
337 entry->next->prev = entry;
338 }
339
340 // we just added an entry, increase the size of the hashtable
341 gHashTable.count++;
342 }
343
344 return entry;
345}
346
347static int is_valid_entry(HashEntry* entry)
348{
349 if (entry != NULL) {
350 int i;
351 for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
352 HashEntry* e1 = gHashTable.slots[i];
353
354 while (e1 != NULL) {
355 if (e1 == entry) {
356 return 1;
357 }
358
359 e1 = e1->next;
360 }
361 }
362 }
363
364 return 0;
365}
366
367static void remove_entry(HashEntry* entry)
368{
369 HashEntry* prev = entry->prev;
370 HashEntry* next = entry->next;
371
372 if (prev != NULL) entry->prev->next = next;
373 if (next != NULL) entry->next->prev = prev;
374
375 if (prev == NULL) {
376 // we are the head of the list. set the head to be next
377 gHashTable.slots[entry->slot] = entry->next;
378 }
379
380 // we just removed and entry, decrease the size of the hashtable
381 gHashTable.count--;
382}
383
384
385// =============================================================================
386// stack trace functions
387// =============================================================================
388
389typedef struct
390{
391 size_t count;
392 intptr_t* addrs;
393} stack_crawl_state_t;
394
395
396/* depends how the system includes define this */
397#ifdef HAVE_UNWIND_CONTEXT_STRUCT
398typedef struct _Unwind_Context __unwind_context;
399#else
400typedef _Unwind_Context __unwind_context;
401#endif
402
403static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
404{
405 stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
406 if (state->count) {
407 intptr_t ip = (intptr_t)_Unwind_GetIP(context);
408 if (ip) {
409 state->addrs[0] = ip;
410 state->addrs++;
411 state->count--;
412 return _URC_NO_REASON;
413 }
414 }
415 /*
416 * If we run out of space to record the address or 0 has been seen, stop
417 * unwinding the stack.
418 */
419 return _URC_END_OF_STACK;
420}
421
422static inline
423int get_backtrace(intptr_t* addrs, size_t max_entries)
424{
425 stack_crawl_state_t state;
426 state.count = max_entries;
427 state.addrs = (intptr_t*)addrs;
428 _Unwind_Backtrace(trace_function, (void*)&state);
429 return max_entries - state.count;
430}
431
432// =============================================================================
433// malloc leak function dispatcher
434// =============================================================================
435
436static void* leak_malloc(size_t bytes);
437static void leak_free(void* mem);
438static void* leak_calloc(size_t n_elements, size_t elem_size);
439static void* leak_realloc(void* oldMem, size_t bytes);
440static void* leak_memalign(size_t alignment, size_t bytes);
441
442static void* fill_malloc(size_t bytes);
443static void fill_free(void* mem);
444static void* fill_realloc(void* oldMem, size_t bytes);
445static void* fill_memalign(size_t alignment, size_t bytes);
446
447static void* chk_malloc(size_t bytes);
448static void chk_free(void* mem);
449static void* chk_calloc(size_t n_elements, size_t elem_size);
450static void* chk_realloc(void* oldMem, size_t bytes);
451static void* chk_memalign(size_t alignment, size_t bytes);
452
453typedef struct {
454 void* (*malloc)(size_t bytes);
455 void (*free)(void* mem);
456 void* (*calloc)(size_t n_elements, size_t elem_size);
457 void* (*realloc)(void* oldMem, size_t bytes);
458 void* (*memalign)(size_t alignment, size_t bytes);
459} MallocDebug;
460
461static const MallocDebug gMallocEngineTable[] __attribute__((aligned(32))) =
462{
463 { dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign },
464 { leak_malloc, leak_free, leak_calloc, leak_realloc, leak_memalign },
465 { fill_malloc, fill_free, dlcalloc, fill_realloc, fill_memalign },
466 { chk_malloc, chk_free, chk_calloc, chk_realloc, chk_memalign }
467};
468
469enum {
470 INDEX_NORMAL = 0,
471 INDEX_LEAK_CHECK,
472 INDEX_MALLOC_FILL,
473 INDEX_MALLOC_CHECK,
474};
475
476static MallocDebug const * gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
477static int gMallocDebugLevel;
478static int gTrapOnError = 1;
479
480void* malloc(size_t bytes) {
481 return gMallocDispatch->malloc(bytes);
482}
483void free(void* mem) {
484 gMallocDispatch->free(mem);
485}
486void* calloc(size_t n_elements, size_t elem_size) {
487 return gMallocDispatch->calloc(n_elements, elem_size);
488}
489void* realloc(void* oldMem, size_t bytes) {
490 return gMallocDispatch->realloc(oldMem, bytes);
491}
492void* memalign(size_t alignment, size_t bytes) {
493 return gMallocDispatch->memalign(alignment, bytes);
494}
495
496// =============================================================================
497// malloc check functions
498// =============================================================================
499
500#define CHK_FILL_FREE 0xef
501#define CHK_SENTINEL_VALUE 0xeb
502#define CHK_SENTINEL_HEAD_SIZE 16
503#define CHK_SENTINEL_TAIL_SIZE 16
504#define CHK_OVERHEAD_SIZE ( CHK_SENTINEL_HEAD_SIZE + \
505 CHK_SENTINEL_TAIL_SIZE + \
506 sizeof(size_t) )
507
508static void dump_stack_trace()
509{
510 intptr_t addrs[20];
511 int c = get_backtrace(addrs, 20);
512 char buf[16];
513 char tmp[16*20];
514 int i;
515
516 tmp[0] = 0; // Need to initialize tmp[0] for the first strcat
517 for (i=0 ; i<c; i++) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200518 snprintf(buf, sizeof buf, "%2d: %08x\n", i, addrs[i]);
519 strlcat(tmp, buf, sizeof tmp);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800520 }
521 __libc_android_log_print(ANDROID_LOG_ERROR, "libc", "call stack:\n%s", tmp);
522}
523
524static int is_valid_malloc_pointer(void* addr)
525{
526 return 1;
527}
528
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200529static void assert_log_message(const char* format, ...)
530{
531 va_list args;
532
533 pthread_mutex_lock(&gAllocationsMutex);
534 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
535 va_start(args, format);
536 __libc_android_log_vprint(ANDROID_LOG_ERROR, "libc",
537 format, args);
538 va_end(args);
539 dump_stack_trace();
540 if (gTrapOnError) {
541 __builtin_trap();
542 }
543 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
544 pthread_mutex_unlock(&gAllocationsMutex);
545}
546
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800547static void assert_valid_malloc_pointer(void* mem)
548{
549 if (mem && !is_valid_malloc_pointer(mem)) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200550 assert_log_message(
551 "*** MALLOC CHECK: buffer %p, is not a valid "
552 "malloc pointer (are you mixing up new/delete "
553 "and malloc/free?)", mem);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800554 }
555}
556
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200557/* Check that a given address corresponds to a guarded block,
558 * and returns its original allocation size in '*allocated'.
559 * 'func' is the capitalized name of the caller function.
560 * Returns 0 on success, or -1 on failure.
561 * NOTE: Does not return if gTrapOnError is set.
562 */
563static int chk_mem_check(void* mem,
564 size_t* allocated,
565 const char* func)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800566{
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200567 char* buffer;
568 size_t offset, bytes;
569 int i;
570 char* buf;
571
572 /* first check the bytes in the sentinel header */
573 buf = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800574 for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
575 if (buf[i] != CHK_SENTINEL_VALUE) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200576 assert_log_message(
577 "*** %s CHECK: buffer %p "
578 "corrupted %d bytes before allocation",
579 func, mem, CHK_SENTINEL_HEAD_SIZE-i);
580 return -1;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800581 }
582 }
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200583
584 /* then the ones in the sentinel trailer */
585 buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
586 offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
587 bytes = *(size_t *)(buffer + offset);
588
589 buf = (char*)mem + bytes;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800590 for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
591 if (buf[i] != CHK_SENTINEL_VALUE) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200592 assert_log_message(
593 "*** %s CHECK: buffer %p, size=%lu, "
594 "corrupted %d bytes after allocation",
595 func, buffer, bytes, i+1);
596 return -1;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800597 }
598 }
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200599
600 *allocated = bytes;
601 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800602}
603
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200604
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800605void* chk_malloc(size_t bytes)
606{
607 char* buffer = (char*)dlmalloc(bytes + CHK_OVERHEAD_SIZE);
608 if (buffer) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200609 memset(buffer, CHK_SENTINEL_VALUE, bytes + CHK_OVERHEAD_SIZE);
610 size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
611 *(size_t *)(buffer + offset) = bytes;
612 buffer += CHK_SENTINEL_HEAD_SIZE;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800613 }
614 return buffer;
615}
616
617void chk_free(void* mem)
618{
619 assert_valid_malloc_pointer(mem);
620 if (mem) {
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200621 size_t size;
622 char* buffer;
623
624 if (chk_mem_check(mem, &size, "FREE") == 0) {
625 buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
626 memset(buffer, CHK_FILL_FREE, size + CHK_OVERHEAD_SIZE);
627 dlfree(buffer);
628 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800629 }
630}
631
632void* chk_calloc(size_t n_elements, size_t elem_size)
633{
634 size_t size;
635 void* ptr;
636
637 /* Fail on overflow - just to be safe even though this code runs only
638 * within the debugging C library, not the production one */
639 if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
640 return NULL;
641 }
642 size = n_elements * elem_size;
643 ptr = chk_malloc(size);
644 if (ptr != NULL) {
645 memset(ptr, 0, size);
646 }
647 return ptr;
648}
649
650void* chk_realloc(void* mem, size_t bytes)
651{
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200652 char* buffer;
653 int ret;
654 size_t old_bytes = 0;
655
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800656 assert_valid_malloc_pointer(mem);
David 'Digit' Turnerc4eee372009-07-08 14:22:41 +0200657
658 if (mem != NULL && chk_mem_check(mem, &old_bytes, "REALLOC") < 0)
659 return NULL;
660
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800661 char* new_buffer = chk_malloc(bytes);
662 if (mem == NULL) {
663 return new_buffer;
664 }
665
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800666 if (new_buffer) {
André Goddard Rosa291100c2010-02-05 16:32:56 -0200667 if (bytes > old_bytes)
668 bytes = old_bytes;
669 memcpy(new_buffer, mem, bytes);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800670 chk_free(mem);
671 }
672
673 return new_buffer;
674}
675
676void* chk_memalign(size_t alignment, size_t bytes)
677{
678 // XXX: it's better to use malloc, than being wrong
679 return chk_malloc(bytes);
680}
681
682// =============================================================================
683// malloc fill functions
684// =============================================================================
685
686void* fill_malloc(size_t bytes)
687{
688 void* buffer = dlmalloc(bytes);
689 if (buffer) {
690 memset(buffer, CHK_SENTINEL_VALUE, bytes);
691 }
692 return buffer;
693}
694
695void fill_free(void* mem)
696{
697 size_t bytes = dlmalloc_usable_size(mem);
698 memset(mem, CHK_FILL_FREE, bytes);
699 dlfree(mem);
700}
701
702void* fill_realloc(void* mem, size_t bytes)
703{
704 void* buffer = fill_malloc(bytes);
705 if (mem == NULL) {
706 return buffer;
707 }
708 if (buffer) {
709 size_t old_size = dlmalloc_usable_size(mem);
710 size_t size = (bytes < old_size)?(bytes):(old_size);
711 memcpy(buffer, mem, size);
712 fill_free(mem);
713 }
714 return buffer;
715}
716
717void* fill_memalign(size_t alignment, size_t bytes)
718{
719 void* buffer = dlmemalign(alignment, bytes);
720 if (buffer) {
721 memset(buffer, CHK_SENTINEL_VALUE, bytes);
722 }
723 return buffer;
724}
725
726// =============================================================================
727// malloc leak functions
728// =============================================================================
729
730#define MEMALIGN_GUARD ((void*)0xA1A41520)
731
732void* leak_malloc(size_t bytes)
733{
734 // allocate enough space infront of the allocation to store the pointer for
735 // the alloc structure. This will making free'ing the structer really fast!
736
737 // 1. allocate enough memory and include our header
738 // 2. set the base pointer to be right after our header
739
740 void* base = dlmalloc(bytes + sizeof(AllocationEntry));
741 if (base != NULL) {
742 pthread_mutex_lock(&gAllocationsMutex);
743
744 intptr_t backtrace[BACKTRACE_SIZE];
745 size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
746
747 AllocationEntry* header = (AllocationEntry*)base;
748 header->entry = record_backtrace(backtrace, numEntries, bytes);
749 header->guard = GUARD;
750
751 // now increment base to point to after our header.
752 // this should just work since our header is 8 bytes.
753 base = (AllocationEntry*)base + 1;
754
755 pthread_mutex_unlock(&gAllocationsMutex);
756 }
757
758 return base;
759}
760
761void leak_free(void* mem)
762{
763 if (mem != NULL) {
764 pthread_mutex_lock(&gAllocationsMutex);
765
766 // check the guard to make sure it is valid
767 AllocationEntry* header = (AllocationEntry*)mem - 1;
768
769 if (header->guard != GUARD) {
770 // could be a memaligned block
771 if (((void**)mem)[-1] == MEMALIGN_GUARD) {
772 mem = ((void**)mem)[-2];
773 header = (AllocationEntry*)mem - 1;
774 }
775 }
776
777 if (header->guard == GUARD || is_valid_entry(header->entry)) {
778 // decrement the allocations
779 HashEntry* entry = header->entry;
780 entry->allocations--;
781 if (entry->allocations <= 0) {
782 remove_entry(entry);
783 dlfree(entry);
784 }
785
786 // now free the memory!
787 dlfree(header);
788 } else {
789 debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
790 header->guard, header->entry);
791 }
792
793 pthread_mutex_unlock(&gAllocationsMutex);
794 }
795}
796
797void* leak_calloc(size_t n_elements, size_t elem_size)
798{
799 size_t size;
800 void* ptr;
801
802 /* Fail on overflow - just to be safe even though this code runs only
803 * within the debugging C library, not the production one */
804 if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
805 return NULL;
806 }
807 size = n_elements * elem_size;
808 ptr = leak_malloc(size);
809 if (ptr != NULL) {
810 memset(ptr, 0, size);
811 }
812 return ptr;
813}
814
815void* leak_realloc(void* oldMem, size_t bytes)
816{
817 if (oldMem == NULL) {
818 return leak_malloc(bytes);
819 }
820 void* newMem = NULL;
821 AllocationEntry* header = (AllocationEntry*)oldMem - 1;
822 if (header && header->guard == GUARD) {
823 size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
824 newMem = leak_malloc(bytes);
825 if (newMem != NULL) {
826 size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
827 memcpy(newMem, oldMem, copySize);
828 leak_free(oldMem);
829 }
830 } else {
831 newMem = dlrealloc(oldMem, bytes);
832 }
833 return newMem;
834}
835
836void* leak_memalign(size_t alignment, size_t bytes)
837{
838 // we can just use malloc
839 if (alignment <= MALLOC_ALIGNMENT)
840 return leak_malloc(bytes);
841
842 // need to make sure it's a power of two
843 if (alignment & (alignment-1))
844 alignment = 1L << (31 - __builtin_clz(alignment));
845
846 // here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
847 // we will align by at least MALLOC_ALIGNMENT bytes
848 // and at most alignment-MALLOC_ALIGNMENT bytes
849 size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
850 void* base = leak_malloc(size);
851 if (base != NULL) {
852 intptr_t ptr = (intptr_t)base;
853 if ((ptr % alignment) == 0)
854 return base;
855
856 // align the pointer
857 ptr += ((-ptr) % alignment);
858
859 // there is always enough space for the base pointer and the guard
860 ((void**)ptr)[-1] = MEMALIGN_GUARD;
861 ((void**)ptr)[-2] = base;
862
863 return (void*)ptr;
864 }
865 return base;
866}
867#endif /* MALLOC_LEAK_CHECK */
868
869// called from libc_init()
870extern char* __progname;
871
872void malloc_debug_init()
873{
874 unsigned int level = 0;
875#ifdef MALLOC_LEAK_CHECK
876 // if MALLOC_LEAK_CHECK is enabled, use level=1 by default
877 level = 1;
878#endif
879 char env[PROP_VALUE_MAX];
880 int len = __system_property_get("libc.debug.malloc", env);
881
882 if (len) {
883 level = atoi(env);
884#ifndef MALLOC_LEAK_CHECK
885 /* Alert the user that libc_debug.so needs to be installed as libc.so
886 * when performing malloc checks.
887 */
888 if (level != 0) {
889 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
890 "Malloc checks need libc_debug.so pushed to the device!\n");
891
892 }
893#endif
894 }
895
896#ifdef MALLOC_LEAK_CHECK
897 gMallocDebugLevel = level;
898 switch (level) {
899 default:
900 case 0:
901 gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
902 break;
903 case 1:
904 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
905 "%s using MALLOC_DEBUG = %d (leak checker)\n",
906 __progname, level);
907 gMallocDispatch = &gMallocEngineTable[INDEX_LEAK_CHECK];
908 break;
909 case 5:
910 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
911 "%s using MALLOC_DEBUG = %d (fill)\n",
912 __progname, level);
913 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_FILL];
914 break;
915 case 10:
916 __libc_android_log_print(ANDROID_LOG_INFO, "libc",
917 "%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
918 __progname, level);
919 gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
920 break;
921 }
922#endif
923}