/* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wmglo@dent.med.uni-muenchen.de>
and Doug Lea <dl@cs.oswego.edu>, 1996.
#endif
+#ifndef LACKS_UNISTD_H
+# include <unistd.h>
+#endif
+
/*
Define HAVE_MMAP to optionally make malloc() use mmap() to
allocate very large blocks. These will be returned to the
*/
#ifndef HAVE_MMAP
-#define HAVE_MMAP 1
+# ifdef _POSIX_MAPPED_FILES
+# define HAVE_MMAP 1
+# endif
#endif
/*
*/
#ifndef HAVE_MREMAP
-#define HAVE_MREMAP defined(__linux__)
+#define HAVE_MREMAP defined(__linux__) && !defined(__arm__)
#endif
#if HAVE_MMAP
#define MAP_ANONYMOUS MAP_ANON
#endif
+#ifndef MAP_NORESERVE
+# ifdef MAP_AUTORESRV
+# define MAP_NORESERVE MAP_AUTORESRV
+# else
+# define MAP_NORESERVE 0
+# endif
+#endif
+
#endif /* HAVE_MMAP */
/*
bsd/gnu getpagesize.h
*/
-#ifndef LACKS_UNISTD_H
-# include <unistd.h>
-#endif
-
#ifndef malloc_getpagesize
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
# ifndef _SC_PAGE_SIZE
const Void_t *caller);
static Void_t* memalign_check(size_t alignment, size_t bytes,
const Void_t *caller);
+#ifndef NO_THREADS
static Void_t* malloc_starter(size_t sz, const Void_t *caller);
static void free_starter(Void_t* mem, const Void_t *caller);
static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
static void free_atfork(Void_t* mem, const Void_t *caller);
#endif
+#endif
#else
static void free_check();
static Void_t* realloc_check();
static Void_t* memalign_check();
+#ifndef NO_THREADS
static Void_t* malloc_starter();
static void free_starter();
static Void_t* malloc_atfork();
static void free_atfork();
#endif
+#endif
#endif
#endif
/* Already initialized? */
-int __malloc_initialized = 0;
+int __malloc_initialized = -1;
+#ifndef NO_THREADS
+
/* The following two functions are registered via thread_atfork() to
make sure that the mutexes remain in a consistent state in the
fork()ed version of a thread. Also adapt the malloc and free hooks
(void)mutex_unlock(&list_lock);
}
+static void
+ptmalloc_init_all __MALLOC_P((void))
+{
+ arena *ar_ptr;
+
+#if defined _LIBC || defined MALLOC_HOOKS
+ tsd_setspecific(arena_key, save_arena);
+ __malloc_hook = save_malloc_hook;
+ __free_hook = save_free_hook;
+#endif
+ for(ar_ptr = &main_arena;;) {
+ (void)mutex_init(&ar_ptr->mutex);
+ ar_ptr = ar_ptr->next;
+ if(ar_ptr == &main_arena) break;
+ }
+ (void)mutex_init(&list_lock);
+}
+
+#endif
+
/* Initialization routine. */
#if defined(_LIBC)
#if 0
const char* s;
#endif
- if(__malloc_initialized) return;
- __malloc_initialized = 1;
+ if(__malloc_initialized >= 0) return;
+ __malloc_initialized = 0;
+#ifndef NO_THREADS
#if defined _LIBC || defined MALLOC_HOOKS
/* With some threads implementations, creating thread-specific data
or initializing a mutex may call malloc() itself. Provide a
__malloc_hook = malloc_starter;
__free_hook = free_starter;
#endif
-#if defined _LIBC && !defined NO_THREADS
+#ifdef _LIBC
/* Initialize the pthreads interface. */
if (__pthread_initialize != NULL)
__pthread_initialize();
#endif
-#ifndef NO_THREADS
mutex_init(&main_arena.mutex);
mutex_init(&list_lock);
tsd_key_create(&arena_key, NULL);
tsd_setspecific(arena_key, (Void_t *)&main_arena);
- thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all);
-#endif
+ thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_init_all);
+#endif /* !defined NO_THREADS */
#if defined _LIBC || defined MALLOC_HOOKS
if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
mALLOPt(M_TRIM_THRESHOLD, atoi(s));
if((s = getenv("MALLOC_MMAP_MAX_")))
mALLOPt(M_MMAP_MAX, atoi(s));
s = getenv("MALLOC_CHECK_");
+#ifndef NO_THREADS
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
+#endif
if(s) {
if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
__malloc_check_init();
if(__malloc_initialize_hook != NULL)
(*__malloc_initialize_hook)();
#endif
+ __malloc_initialized = 1;
}
/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
#ifdef thread_atfork_static
thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
- ptmalloc_unlock_all)
+ ptmalloc_init_all)
#endif
#if defined _LIBC || defined MALLOC_HOOKS
initialization routine, then do the normal work. */
static Void_t*
-#ifdef _LIBC
-malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
-#else
#if __STD_C
-malloc_hook_ini(size_t sz)
+malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
#else
-malloc_hook_ini(sz) size_t sz;
-#endif
+malloc_hook_ini(sz, caller)
+ size_t sz; const __malloc_ptr_t caller;
#endif
{
__malloc_hook = NULL;
- __realloc_hook = NULL;
- __memalign_hook = NULL;
ptmalloc_init();
return mALLOc(sz);
}
{
__malloc_hook = NULL;
__realloc_hook = NULL;
- __memalign_hook = NULL;
ptmalloc_init();
return rEALLOc(ptr, sz);
}
size_t sz; size_t alignment; const __malloc_ptr_t caller;
#endif
{
- __malloc_hook = NULL;
- __realloc_hook = NULL;
__memalign_hook = NULL;
ptmalloc_init();
return mEMALIGn(sz, alignment);
= memalign_hook_ini;
void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;
+/* Whether we are using malloc checking. */
+static int using_malloc_checking;
+
+/* A flag that is set by malloc_set_state, to signal that malloc checking
+ must not be enabled on the request from the user (via the MALLOC_CHECK_
+ environment variable). It is reset by __malloc_check_init to tell
+ malloc_set_state that the user has requested malloc checking.
+
+ The purpose of this flag is to make sure that malloc checking is not
+ enabled when the heap to be restored was constructed without malloc
+ checking, and thus does not contain the required magic bytes.
+ Otherwise the heap would be corrupted by calls to free and realloc. If
+ it turns out that the heap was created with malloc checking and the
+ user has requested it malloc_set_state just calls __malloc_check_init
+ again to enable it. On the other hand, reusing such a heap without
+ further malloc checking is safe. */
+static int disallow_malloc_check;
+
/* Activate a standard set of debugging hooks. */
void
__malloc_check_init()
{
+ if (disallow_malloc_check) {
+ disallow_malloc_check = 0;
+ return;
+ }
+ using_malloc_checking = 1;
__malloc_hook = malloc_check;
__free_hook = free_check;
__realloc_hook = realloc_check;
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
-#define MMAP(size, prot) ((dev_zero_fd < 0) ? \
+#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
(dev_zero_fd = open("/dev/zero", O_RDWR), \
- mmap(0, (size), (prot), MAP_PRIVATE, dev_zero_fd, 0)) : \
- mmap(0, (size), (prot), MAP_PRIVATE, dev_zero_fd, 0))
+ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
+ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
#else
-#define MMAP(size, prot) \
- (mmap(0, (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS, -1, 0))
+#define MMAP(addr, size, prot, flags) \
+ (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
#endif
*/
size = (size + SIZE_SZ + page_mask) & ~page_mask;
- p = (mchunkptr)MMAP(size, PROT_READ|PROT_WRITE);
+ p = (mchunkptr)MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE);
if(p == (mchunkptr) MAP_FAILED) return 0;
n_mmaps++;
size = HEAP_MAX_SIZE;
size = (size + page_mask) & ~page_mask;
- p1 = (char *)MMAP(HEAP_MAX_SIZE<<1, PROT_NONE);
+ /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
+ No swap space needs to be reserved for the following large
+ mapping (on Linux, this is the case for all non-writable mappings
+ anyway). */
+ p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p1 == MAP_FAILED)
return 0;
p2 = (char *)(((unsigned long)p1 + HEAP_MAX_SIZE) & ~(HEAP_MAX_SIZE-1));
new_size = (long)h->size + diff;
if(new_size < (long)sizeof(*h))
return -1;
- if(mprotect((char *)h + new_size, -diff, PROT_NONE) != 0)
+ /* Try to re-map the extra heap space freshly to save memory, and
+ make it inaccessible. */
+ if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE,
+ MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
return -2;
}
h->size = new_size;
}
/* Check the global, circularly linked list for available arenas. */
+ repeat:
do {
if(!mutex_trylock(&a->mutex)) {
THREAD_STAT(++(a->stat_lock_loop));
a = a->next;
} while(a != a_tsd);
+ /* If not even the list_lock can be obtained, try again. This can
+ happen during `atfork', or for example on systems where thread
+ creation makes it temporarily impossible to obtain _any_
+ locks. */
+ if(mutex_trylock(&list_lock)) {
+ a = a_tsd;
+ goto repeat;
+ }
+ (void)mutex_unlock(&list_lock);
+
/* Nothing immediately available, so generate a new arena. */
h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT));
if(!h)
if(ar_ptr != &main_arena) {
heap_info *heap = heap_for_ptr(p);
assert(heap->ar_ptr == ar_ptr);
- assert((char *)p + sz <= (char *)heap + heap->size);
+ if(p != top(ar_ptr))
+ assert((char *)p + sz <= (char *)heap + heap->size);
+ else
+ assert((char *)p + sz == (char *)heap + heap->size);
return;
}
#endif
mallocs with other sbrk calls.
- All allocations are made from the the `lowest' part of any found
+ All allocations are made from the `lowest' part of any found
chunk. (The implementation invariant is that prev_inuse is
always true of any allocated chunk; i.e., that each allocated
chunk borders either a previously allocated and still in-use chunk,
mi->arena = ar_ptr->size;
mi->ordblks = navail;
+ mi->smblks = mi->usmblks = mi->fsmblks = 0; /* clear unused fields */
mi->uordblks = ar_ptr->size - avail;
mi->fordblks = avail;
mi->hblks = n_mmaps;
functions. */
#define MALLOC_STATE_MAGIC 0x444c4541l
-#define MALLOC_STATE_VERSION (0*0x100l + 0l) /* major*0x100 + minor */
+#define MALLOC_STATE_VERSION (0*0x100l + 1l) /* major*0x100 + minor */
struct malloc_state {
long magic;
unsigned int max_n_mmaps;
unsigned long mmapped_mem;
unsigned long max_mmapped_mem;
+ int using_malloc_checking;
};
Void_t*
mALLOC_GET_STATe()
{
- mchunkptr victim;
struct malloc_state* ms;
int i;
mbinptr b;
- ptmalloc_init();
- (void)mutex_lock(&main_arena.mutex);
- victim = chunk_alloc(&main_arena, request2size(sizeof(*ms)));
- if(!victim) {
- (void)mutex_unlock(&main_arena.mutex);
+ ms = (struct malloc_state*)mALLOc(sizeof(*ms));
+ if (!ms)
return 0;
- }
- ms = (struct malloc_state*)chunk2mem(victim);
+ (void)mutex_lock(&main_arena.mutex);
ms->magic = MALLOC_STATE_MAGIC;
ms->version = MALLOC_STATE_VERSION;
ms->av[0] = main_arena.av[0];
ms->max_n_mmaps = max_n_mmaps;
ms->mmapped_mem = mmapped_mem;
ms->max_mmapped_mem = max_mmapped_mem;
+#if defined _LIBC || defined MALLOC_HOOKS
+ ms->using_malloc_checking = using_malloc_checking;
+#else
+ ms->using_malloc_checking = 0;
+#endif
(void)mutex_unlock(&main_arena.mutex);
return (Void_t*)ms;
}
int i;
mbinptr b;
+#if defined _LIBC || defined MALLOC_HOOKS
+ disallow_malloc_check = 1;
+#endif
ptmalloc_init();
if(ms->magic != MALLOC_STATE_MAGIC) return -1;
/* Must fail if the major version is too high. */
mmapped_mem = ms->mmapped_mem;
max_mmapped_mem = ms->max_mmapped_mem;
/* add version-dependent code here */
+ if (ms->version >= 1) {
+#if defined _LIBC || defined MALLOC_HOOKS
+ /* Check whether it is safe to enable malloc checking. */
+ if (ms->using_malloc_checking && !using_malloc_checking &&
+ !disallow_malloc_check)
+ __malloc_check_init ();
+#endif
+ }
+
(void)mutex_unlock(&main_arena.mutex);
return 0;
}
/* A simple, standard set of debugging hooks. Overhead is `only' one
byte per chunk; still this will catch most cases of double frees or
- overruns. */
+ overruns. The goal here is to avoid obscure crashes due to invalid
+ usage, unlike in the MALLOC_DEBUG code. */
#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
+/* Instrument a chunk with overrun detector byte(s) and convert it
+ into a user pointer with requested size sz. */
+
+static Void_t*
+#if __STD_C
+chunk2mem_check(mchunkptr p, size_t sz)
+#else
+chunk2mem_check(p, sz) mchunkptr p; size_t sz;
+#endif
+{
+ unsigned char* m_ptr = (unsigned char*)chunk2mem(p);
+ size_t i;
+
+ for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
+ i > sz;
+ i -= 0xFF) {
+ if(i-sz < 0x100) {
+ m_ptr[i] = (unsigned char)(i-sz);
+ break;
+ }
+ m_ptr[i] = 0xFF;
+ }
+ m_ptr[sz] = MAGICBYTE(p);
+ return (Void_t*)m_ptr;
+}
+
/* Convert a pointer to be free()d or realloc()ed to a valid chunk
- pointer. If the provided pointer is not valid, return NULL. The
- goal here is to avoid crashes, unlike in the MALLOC_DEBUG code. */
+ pointer. If the provided pointer is not valid, return NULL. */
static mchunkptr
internal_function
#endif
{
mchunkptr p;
- INTERNAL_SIZE_T sz;
+ INTERNAL_SIZE_T sz, c;
+ unsigned char magic;
p = mem2chunk(mem);
if(!aligned_OK(p)) return NULL;
(long)prev_chunk(p)<(long)sbrk_base ||
next_chunk(prev_chunk(p))!=p) ))
return NULL;
- if(*((unsigned char*)p + sz + (SIZE_SZ-1)) != MAGICBYTE(p))
- return NULL;
- *((unsigned char*)p + sz + (SIZE_SZ-1)) ^= 0xFF;
+ magic = MAGICBYTE(p);
+ for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
+ if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
+ }
+ ((unsigned char*)p)[sz] ^= 0xFF;
} else {
unsigned long offset, page_mask = malloc_getpagesize-1;
( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
return NULL;
- if(*((unsigned char*)p + sz - 1) != MAGICBYTE(p))
- return NULL;
- *((unsigned char*)p + sz - 1) ^= 0xFF;
+ magic = MAGICBYTE(p);
+ for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
+ if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
+ }
+ ((unsigned char*)p)[sz] ^= 0xFF;
}
return p;
}
+/* Check for corruption of the top chunk, and try to recover if
+ necessary. */
+
+static int
+#if __STD_C
+top_check(void)
+#else
+top_check()
+#endif
+{
+ mchunkptr t = top(&main_arena);
+ char* brk, * new_brk;
+ INTERNAL_SIZE_T front_misalign, sbrk_size;
+ unsigned long pagesz = malloc_getpagesize;
+
+ if((char*)t + chunksize(t) == sbrk_base + sbrked_mem ||
+ t == initial_top(&main_arena)) return 0;
+
+ switch(check_action) {
+ case 1:
+ fprintf(stderr, "malloc: top chunk is corrupt\n");
+ break;
+ case 2:
+ abort();
+ }
+ /* Try to set up a new top chunk. */
+ brk = MORECORE(0);
+ front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+ if (front_misalign > 0)
+ front_misalign = MALLOC_ALIGNMENT - front_misalign;
+ sbrk_size = front_misalign + top_pad + MINSIZE;
+ sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
+ new_brk = (char*)(MORECORE (sbrk_size));
+ if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
+ sbrked_mem = (new_brk - sbrk_base) + sbrk_size;
+
+ top(&main_arena) = (mchunkptr)(brk + front_misalign);
+ set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
+
+ return 0;
+}
+
static Void_t*
#if __STD_C
malloc_check(size_t sz, const Void_t *caller)
INTERNAL_SIZE_T nb = request2size(sz + 1);
(void)mutex_lock(&main_arena.mutex);
- victim = chunk_alloc(&main_arena, nb);
+ victim = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
(void)mutex_unlock(&main_arena.mutex);
if(!victim) return NULL;
- nb = chunksize(victim);
- if(chunk_is_mmapped(victim))
- --nb;
- else
- nb += SIZE_SZ - 1;
- *((unsigned char*)victim + nb) = MAGICBYTE(victim);
- return chunk2mem(victim);
+ return chunk2mem_check(victim, sz);
}
static void
(void)mutex_unlock(&main_arena.mutex);
switch(check_action) {
case 1:
- fprintf(stderr, "free(): invalid pointer %lx!\n", (long)(mem));
+ fprintf(stderr, "free(): invalid pointer %p!\n", mem);
break;
case 2:
abort();
(void)mutex_unlock(&main_arena.mutex);
switch(check_action) {
case 1:
- fprintf(stderr, "realloc(): invalid pointer %lx!\n", (long)(oldmem));
+ fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
break;
case 2:
abort();
if(oldsize - SIZE_SZ >= nb) newp = oldp; /* do nothing */
else {
/* Must alloc, copy, free. */
- newp = chunk_alloc(&main_arena, nb);
+ newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
if (newp) {
MALLOC_COPY(chunk2mem(newp), oldmem, oldsize - 2*SIZE_SZ);
munmap_chunk(oldp);
#endif
} else {
#endif /* HAVE_MMAP */
- newp = chunk_realloc(&main_arena, oldp, oldsize, nb);
+ newp = (top_check() >= 0) ?
+ chunk_realloc(&main_arena, oldp, oldsize, nb) : NULL;
#if 0 /* Erase freed memory. */
nb = chunksize(newp);
if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
(void)mutex_unlock(&main_arena.mutex);
if(!newp) return NULL;
- nb = chunksize(newp);
- if(chunk_is_mmapped(newp))
- --nb;
- else
- nb += SIZE_SZ - 1;
- *((unsigned char*)newp + nb) = MAGICBYTE(newp);
- return chunk2mem(newp);
+ return chunk2mem_check(newp, bytes);
}
static Void_t*
nb = request2size(bytes+1);
(void)mutex_lock(&main_arena.mutex);
- p = chunk_align(&main_arena, nb, alignment);
+ p = (top_check() >= 0) ? chunk_align(&main_arena, nb, alignment) : NULL;
(void)mutex_unlock(&main_arena.mutex);
if(!p) return NULL;
- nb = chunksize(p);
- if(chunk_is_mmapped(p))
- --nb;
- else
- nb += SIZE_SZ - 1;
- *((unsigned char*)p + nb) = MAGICBYTE(p);
- return chunk2mem(p);
+ return chunk2mem_check(p, bytes);
}
+#ifndef NO_THREADS
+
/* The following hooks are used when the global initialization in
ptmalloc_init() hasn't completed yet. */
(void)mutex_unlock(&ar_ptr->mutex);
}
+#endif
+
#endif /* defined _LIBC || defined MALLOC_HOOKS */
\f