X-Git-Url: http://git.csclub.uwaterloo.ca/?p=kopensolaris-gnu%2Fglibc.git;a=blobdiff_plain;f=malloc%2Fmalloc.c;h=03d68b75e7b40ef628a19b8249b61d157c3c0718;hp=50bba6b44632e19a85b34bbbbb218700f551e746;hb=743471fe77464fefc691b203779939468f58e67d;hpb=8ef1ef7081327bec1a19e8041a9b2eda311e73f5 diff --git a/malloc/malloc.c b/malloc/malloc.c index 50bba6b446..03d68b75e7 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -1,5 +1,5 @@ /* Malloc implementation for multiple threads without lock contention. - Copyright (C) 1996, 1997 Free Software Foundation, Inc. + Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Wolfram Gloger and Doug Lea , 1996. @@ -19,7 +19,7 @@ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -/* V2.6.4-pt2 Sat Dec 14 1996 +/* V2.6.4-pt3 Thu Feb 20 1997 This work is mainly derived from malloc-2.6.4 by Doug Lea , which is available from: @@ -297,7 +297,7 @@ #if __STD_C # include /* for size_t */ -# if defined(_LIBC) || defined(MALLOC_HOOKS) +# if defined _LIBC || defined MALLOC_HOOKS # include /* for getenv(), abort() */ # endif #else @@ -493,6 +493,10 @@ do { \ #endif +#ifndef LACKS_UNISTD_H +# include +#endif + /* Define HAVE_MMAP to optionally make malloc() use mmap() to allocate very large blocks. These will be returned to the @@ -500,7 +504,9 @@ do { \ */ #ifndef HAVE_MMAP -#define HAVE_MMAP 1 +# ifdef _POSIX_MAPPED_FILES +# define HAVE_MMAP 1 +# endif #endif /* @@ -510,7 +516,7 @@ do { \ */ #ifndef HAVE_MREMAP -#define HAVE_MREMAP defined(__linux__) +#define HAVE_MREMAP defined(__linux__) && !defined(__arm__) #endif #if HAVE_MMAP @@ -523,6 +529,14 @@ do { \ #define MAP_ANONYMOUS MAP_ANON #endif +#ifndef MAP_NORESERVE +# ifdef MAP_AUTORESRV +# define MAP_NORESERVE MAP_AUTORESRV +# else +# define MAP_NORESERVE 0 +# endif +#endif + #endif /* HAVE_MMAP */ /* @@ -533,10 +547,6 @@ do { \ bsd/gnu getpagesize.h */ -#ifndef LACKS_UNISTD_H -# include -#endif - #ifndef malloc_getpagesize # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ # ifndef _SC_PAGE_SIZE @@ -647,7 +657,7 @@ do { \ might set to a value close to the average size of a process (program) running on your system. Releasing this much memory would allow such a process to run in memory. Generally, it's - worth it to tune for trimming rather tham memory mapping when a + worth it to tune for trimming rather than memory mapping when a program undergoes phases where several large chunks are allocated and released in ways that can reuse each other's storage, perhaps mixed with phases where there are no such @@ -820,12 +830,12 @@ do { \ #if __STD_C Void_t * __default_morecore (ptrdiff_t); -static Void_t *(*__morecore)(ptrdiff_t) = __default_morecore; +Void_t *(*__morecore)(ptrdiff_t) = __default_morecore; #else Void_t * __default_morecore (); -static Void_t *(*__morecore)() = __default_morecore; +Void_t *(*__morecore)() = __default_morecore; #endif @@ -835,6 +845,7 @@ static Void_t *(*__morecore)() = __default_morecore; #define mmap __mmap #define munmap __munmap #define mremap __mremap +#define mprotect __mprotect #undef malloc_getpagesize #define malloc_getpagesize __getpagesize() @@ -874,6 +885,8 @@ extern Void_t* sbrk(); #define mALLOC_STATs __malloc_stats #define mALLOC_USABLE_SIZe __malloc_usable_size #define mALLOC_TRIm __malloc_trim +#define mALLOC_GET_STATe __malloc_get_state +#define mALLOC_SET_STATe __malloc_set_state #else @@ -889,6 +902,8 @@ extern Void_t* sbrk(); #define mALLOC_STATs malloc_stats #define mALLOC_USABLE_SIZe malloc_usable_size #define mALLOC_TRIm malloc_trim +#define mALLOC_GET_STATe malloc_get_state +#define mALLOC_SET_STATe malloc_set_state #endif @@ -912,7 +927,11 @@ size_t mALLOC_USABLE_SIZe(Void_t*); void mALLOC_STATs(void); int mALLOPt(int, int); struct mallinfo mALLINFo(void); -#else +Void_t* mALLOC_GET_STATe(void); +int mALLOC_SET_STATe(Void_t*); + +#else /* !__STD_C */ + #ifndef _LIBC void ptmalloc_init(); #endif @@ -929,7 +948,10 @@ size_t mALLOC_USABLE_SIZe(); void mALLOC_STATs(); int mALLOPt(); struct mallinfo mALLINFo(); -#endif +Void_t* mALLOC_GET_STATe(); +int mALLOC_SET_STATe(); + +#endif /* __STD_C */ #ifdef __cplusplus @@ -1156,23 +1178,31 @@ typedef struct _heap_info { #if __STD_C -static void chunk_free(arena *ar_ptr, mchunkptr p); -static mchunkptr chunk_alloc(arena *ar_ptr, INTERNAL_SIZE_T size); +static void chunk_free(arena *ar_ptr, mchunkptr p) internal_function; +static mchunkptr chunk_alloc(arena *ar_ptr, INTERNAL_SIZE_T size) + internal_function; static mchunkptr chunk_realloc(arena *ar_ptr, mchunkptr oldp, - INTERNAL_SIZE_T oldsize, INTERNAL_SIZE_T nb); + INTERNAL_SIZE_T oldsize, INTERNAL_SIZE_T nb) + internal_function; static mchunkptr chunk_align(arena *ar_ptr, INTERNAL_SIZE_T nb, - size_t alignment); -static int main_trim(size_t pad); + size_t alignment) internal_function; +static int main_trim(size_t pad) internal_function; #ifndef NO_THREADS -static int heap_trim(heap_info *heap, size_t pad); +static int heap_trim(heap_info *heap, size_t pad) internal_function; +#endif +#if defined _LIBC || defined MALLOC_HOOKS +static Void_t* malloc_check(size_t sz, const Void_t *caller); +static void free_check(Void_t* mem, const Void_t *caller); +static Void_t* realloc_check(Void_t* oldmem, size_t bytes, + const Void_t *caller); +static Void_t* memalign_check(size_t alignment, size_t bytes, + const Void_t *caller); +#ifndef NO_THREADS +static Void_t* malloc_starter(size_t sz, const Void_t *caller); +static void free_starter(Void_t* mem, const Void_t *caller); +static Void_t* malloc_atfork(size_t sz, const Void_t *caller); +static void free_atfork(Void_t* mem, const Void_t *caller); #endif -#if defined(_LIBC) || defined(MALLOC_HOOKS) -static Void_t* malloc_check(size_t sz); -static void free_check(Void_t* mem); -static Void_t* realloc_check(Void_t* oldmem, size_t bytes); -static Void_t* memalign_check(size_t alignment, size_t bytes); -static Void_t* malloc_starter(size_t sz); -static void free_starter(Void_t* mem); #endif #else @@ -1185,15 +1215,26 @@ static int main_trim(); #ifndef NO_THREADS static int heap_trim(); #endif -#if defined(_LIBC) || defined(MALLOC_HOOKS) +#if defined _LIBC || defined MALLOC_HOOKS static Void_t* malloc_check(); static void free_check(); static Void_t* realloc_check(); static Void_t* memalign_check(); +#ifndef NO_THREADS static Void_t* malloc_starter(); static void free_starter(); +static Void_t* malloc_atfork(); +static void free_atfork(); +#endif +#endif + #endif +/* On some platforms we can compile internal, not exported functions better. + Let the environment provide a macro and define it to be empty if it + is not available. */ +#ifndef internal_function +# define internal_function #endif @@ -1358,13 +1399,13 @@ static void free_starter(); Indexing into bins */ -#define bin_index(sz) \ -(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ - ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ - ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ - ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ - ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ - ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ +#define bin_index(sz) \ +(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3):\ + ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6):\ + ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9):\ + ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12):\ + ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15):\ + ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18):\ 126) /* bins for chunks < 512 are all spaced 8 bytes apart, and hold @@ -1486,10 +1527,93 @@ static unsigned long max_mmapped_mem = 0; +#ifndef _LIBC +#define weak_variable +#else +/* In GNU libc we want the hook variables to be weak definitions to + avoid a problem with Emacs. */ +#define weak_variable weak_function +#endif /* Already initialized? */ -int __malloc_initialized = 0; +int __malloc_initialized = -1; + +#ifndef NO_THREADS + +/* The following two functions are registered via thread_atfork() to + make sure that the mutexes remain in a consistent state in the + fork()ed version of a thread. Also adapt the malloc and free hooks + temporarily, because the `atfork' handler mechanism may use + malloc/free internally (e.g. in LinuxThreads). */ + +#if defined _LIBC || defined MALLOC_HOOKS +static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size, + const __malloc_ptr_t)); +static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr, + const __malloc_ptr_t)); +static Void_t* save_arena; +#endif + +static void +ptmalloc_lock_all __MALLOC_P((void)) +{ + arena *ar_ptr; + + (void)mutex_lock(&list_lock); + for(ar_ptr = &main_arena;;) { + (void)mutex_lock(&ar_ptr->mutex); + ar_ptr = ar_ptr->next; + if(ar_ptr == &main_arena) break; + } +#if defined _LIBC || defined MALLOC_HOOKS + save_malloc_hook = __malloc_hook; + save_free_hook = __free_hook; + __malloc_hook = malloc_atfork; + __free_hook = free_atfork; + /* Only the current thread may perform malloc/free calls now. */ + tsd_getspecific(arena_key, save_arena); + tsd_setspecific(arena_key, (Void_t*)0); +#endif +} + +static void +ptmalloc_unlock_all __MALLOC_P((void)) +{ + arena *ar_ptr; + +#if defined _LIBC || defined MALLOC_HOOKS + tsd_setspecific(arena_key, save_arena); + __malloc_hook = save_malloc_hook; + __free_hook = save_free_hook; +#endif + for(ar_ptr = &main_arena;;) { + (void)mutex_unlock(&ar_ptr->mutex); + ar_ptr = ar_ptr->next; + if(ar_ptr == &main_arena) break; + } + (void)mutex_unlock(&list_lock); +} + +static void +ptmalloc_init_all __MALLOC_P((void)) +{ + arena *ar_ptr; + +#if defined _LIBC || defined MALLOC_HOOKS + tsd_setspecific(arena_key, save_arena); + __malloc_hook = save_malloc_hook; + __free_hook = save_free_hook; +#endif + for(ar_ptr = &main_arena;;) { + (void)mutex_init(&ar_ptr->mutex); + ar_ptr = ar_ptr->next; + if(ar_ptr == &main_arena) break; + } + (void)mutex_init(&list_lock); +} + +#endif /* Initialization routine. */ #if defined(_LIBC) @@ -1504,15 +1628,14 @@ void ptmalloc_init __MALLOC_P((void)) #endif { -#if defined(_LIBC) || defined(MALLOC_HOOKS) - __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size)); - void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr)); +#if defined _LIBC || defined MALLOC_HOOKS const char* s; #endif - if(__malloc_initialized) return; - __malloc_initialized = 1; -#if defined(_LIBC) || defined(MALLOC_HOOKS) + if(__malloc_initialized >= 0) return; + __malloc_initialized = 0; +#ifndef NO_THREADS +#if defined _LIBC || defined MALLOC_HOOKS /* With some threads implementations, creating thread-specific data or initializing a mutex may call malloc() itself. Provide a simple starter version (realloc() won't work). */ @@ -1521,90 +1644,132 @@ ptmalloc_init __MALLOC_P((void)) __malloc_hook = malloc_starter; __free_hook = free_starter; #endif -#if defined(_LIBC) && !defined (NO_THREADS) +#ifdef _LIBC /* Initialize the pthreads interface. */ if (__pthread_initialize != NULL) __pthread_initialize(); #endif -#ifndef NO_THREADS mutex_init(&main_arena.mutex); mutex_init(&list_lock); tsd_key_create(&arena_key, NULL); tsd_setspecific(arena_key, (Void_t *)&main_arena); -#endif -#if defined(_LIBC) || defined(MALLOC_HOOKS) + thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_init_all); +#endif /* !defined NO_THREADS */ +#if defined _LIBC || defined MALLOC_HOOKS + if((s = getenv("MALLOC_TRIM_THRESHOLD_"))) + mALLOPt(M_TRIM_THRESHOLD, atoi(s)); + if((s = getenv("MALLOC_TOP_PAD_"))) + mALLOPt(M_TOP_PAD, atoi(s)); + if((s = getenv("MALLOC_MMAP_THRESHOLD_"))) + mALLOPt(M_MMAP_THRESHOLD, atoi(s)); + if((s = getenv("MALLOC_MMAP_MAX_"))) + mALLOPt(M_MMAP_MAX, atoi(s)); s = getenv("MALLOC_CHECK_"); +#ifndef NO_THREADS __malloc_hook = save_malloc_hook; __free_hook = save_free_hook; +#endif if(s) { - if(s[0]) mallopt(M_CHECK_ACTION, (int)(s[0] - '0')); - malloc_check_init(); + if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); + __malloc_check_init(); } if(__malloc_initialize_hook != NULL) (*__malloc_initialize_hook)(); #endif + __malloc_initialized = 1; } -#if defined(_LIBC) || defined(MALLOC_HOOKS) +/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */ +#ifdef thread_atfork_static +thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \ + ptmalloc_init_all) +#endif + +#if defined _LIBC || defined MALLOC_HOOKS /* Hooks for debugging versions. The initial hooks just call the initialization routine, then do the normal work. */ static Void_t* #if __STD_C -malloc_hook_ini(size_t sz) +malloc_hook_ini(size_t sz, const __malloc_ptr_t caller) #else -malloc_hook_ini(sz) size_t sz; +malloc_hook_ini(sz, caller) + size_t sz; const __malloc_ptr_t caller; #endif { __malloc_hook = NULL; - __realloc_hook = NULL; - __memalign_hook = NULL; ptmalloc_init(); return mALLOc(sz); } static Void_t* #if __STD_C -realloc_hook_ini(Void_t* ptr, size_t sz) +realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller) #else -realloc_hook_ini(ptr, sz) Void_t* ptr; size_t sz; +realloc_hook_ini(ptr, sz, caller) + Void_t* ptr; size_t sz; const __malloc_ptr_t caller; #endif { __malloc_hook = NULL; __realloc_hook = NULL; - __memalign_hook = NULL; ptmalloc_init(); return rEALLOc(ptr, sz); } static Void_t* #if __STD_C -memalign_hook_ini(size_t sz, size_t alignment) +memalign_hook_ini(size_t sz, size_t alignment, const __malloc_ptr_t caller) #else -memalign_hook_ini(sz, alignment) size_t sz; size_t alignment; +memalign_hook_ini(sz, alignment, caller) + size_t sz; size_t alignment; const __malloc_ptr_t caller; #endif { - __malloc_hook = NULL; - __realloc_hook = NULL; __memalign_hook = NULL; ptmalloc_init(); return mEMALIGn(sz, alignment); } -void (*__malloc_initialize_hook) __MALLOC_P ((void)) = NULL; -void (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr)) = NULL; -__malloc_ptr_t (*__malloc_hook) - __MALLOC_P ((size_t __size)) = malloc_hook_ini; -__malloc_ptr_t (*__realloc_hook) - __MALLOC_P ((__malloc_ptr_t __ptr, size_t __size)) = realloc_hook_ini; -__malloc_ptr_t (*__memalign_hook) - __MALLOC_P ((size_t __size, size_t __alignment)) = memalign_hook_ini; +void weak_variable (*__malloc_initialize_hook) __MALLOC_P ((void)) = NULL; +void weak_variable (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr, + const __malloc_ptr_t)) = NULL; +__malloc_ptr_t weak_variable (*__malloc_hook) + __MALLOC_P ((size_t __size, const __malloc_ptr_t)) = malloc_hook_ini; +__malloc_ptr_t weak_variable (*__realloc_hook) + __MALLOC_P ((__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)) + = realloc_hook_ini; +__malloc_ptr_t weak_variable (*__memalign_hook) + __MALLOC_P ((size_t __size, size_t __alignment, const __malloc_ptr_t)) + = memalign_hook_ini; +void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL; + +/* Whether we are using malloc checking. */ +static int using_malloc_checking; + +/* A flag that is set by malloc_set_state, to signal that malloc checking + must not be enabled on the request from the user (via the MALLOC_CHECK_ + environment variable). It is reset by __malloc_check_init to tell + malloc_set_state that the user has requested malloc checking. + + The purpose of this flag is to make sure that malloc checking is not + enabled when the heap to be restored was constructed without malloc + checking, and thus does not contain the required magic bytes. + Otherwise the heap would be corrupted by calls to free and realloc. If + it turns out that the heap was created with malloc checking and the + user has requested it malloc_set_state just calls __malloc_check_init + again to enable it. On the other hand, reusing such a heap without + further malloc checking is safe. */ +static int disallow_malloc_check; /* Activate a standard set of debugging hooks. */ void -malloc_check_init() +__malloc_check_init() { + if (disallow_malloc_check) { + disallow_malloc_check = 0; + return; + } + using_malloc_checking = 1; __malloc_hook = malloc_check; __free_hook = free_check; __realloc_hook = realloc_check; @@ -1627,22 +1792,28 @@ malloc_check_init() static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ -#define MMAP(size, prot) ((dev_zero_fd < 0) ? \ +#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (size), (prot), MAP_PRIVATE, dev_zero_fd, 0)) : \ - mmap(0, (size), (prot), MAP_PRIVATE, dev_zero_fd, 0)) + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) #else -#define MMAP(size, prot) \ - (mmap(0, (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)) +#define MMAP(addr, size, prot, flags) \ + (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0)) #endif +#if defined __GNUC__ && __GNUC__ >= 2 +/* This function is only called from one place, inline it. */ +inline +#endif +static mchunkptr +internal_function #if __STD_C -static mchunkptr mmap_chunk(size_t size) +mmap_chunk(size_t size) #else -static mchunkptr mmap_chunk(size) size_t size; +mmap_chunk(size) size_t size; #endif { size_t page_mask = malloc_getpagesize - 1; @@ -1655,8 +1826,8 @@ static mchunkptr mmap_chunk(size) size_t size; */ size = (size + SIZE_SZ + page_mask) & ~page_mask; - p = (mchunkptr)MMAP(size, PROT_READ|PROT_WRITE); - if(p == (mchunkptr)-1) return 0; + p = (mchunkptr)MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE); + if(p == (mchunkptr) MAP_FAILED) return 0; n_mmaps++; if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; @@ -1762,6 +1933,7 @@ static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size; of the page size. */ static heap_info * +internal_function #if __STD_C new_heap(size_t size) #else @@ -1773,13 +1945,22 @@ new_heap(size) size_t size; unsigned long ul; heap_info *h; - if(size < HEAP_MIN_SIZE) + if(size+top_pad < HEAP_MIN_SIZE) size = HEAP_MIN_SIZE; - size = (size + page_mask) & ~page_mask; - if(size > HEAP_MAX_SIZE) + else if(size+top_pad <= HEAP_MAX_SIZE) + size += top_pad; + else if(size > HEAP_MAX_SIZE) return 0; - p1 = (char *)MMAP(HEAP_MAX_SIZE<<1, PROT_NONE); - if(p1 == (char *)-1) + else + size = HEAP_MAX_SIZE; + size = (size + page_mask) & ~page_mask; + + /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. + No swap space needs to be reserved for the following large + mapping (on Linux, this is the case for all non-writable mappings + anyway). */ + p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); + if(p1 == MAP_FAILED) return 0; p2 = (char *)(((unsigned long)p1 + HEAP_MAX_SIZE) & ~(HEAP_MAX_SIZE-1)); ul = p2 - p1; @@ -1819,7 +2000,10 @@ grow_heap(h, diff) heap_info *h; long diff; new_size = (long)h->size + diff; if(new_size < (long)sizeof(*h)) return -1; - if(mprotect((char *)h + new_size, -diff, PROT_NONE) != 0) + /* Try to re-map the extra heap space freshly to save memory, and + make it inaccessible. */ + if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE, + MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED) return -2; } h->size = new_size; @@ -1846,6 +2030,7 @@ grow_heap(h, diff) heap_info *h; long diff; } while(0) static arena * +internal_function #if __STD_C arena_get2(arena *a_tsd, size_t size) #else @@ -1871,6 +2056,7 @@ arena_get2(a_tsd, size) arena *a_tsd; size_t size; } /* Check the global, circularly linked list for available arenas. */ + repeat: do { if(!mutex_trylock(&a->mutex)) { THREAD_STAT(++(a->stat_lock_loop)); @@ -1880,6 +2066,16 @@ arena_get2(a_tsd, size) arena *a_tsd; size_t size; a = a->next; } while(a != a_tsd); + /* If not even the list_lock can be obtained, try again. This can + happen during `atfork', or for example on systems where thread + creation makes it temporarily impossible to obtain _any_ + locks. */ + if(mutex_trylock(&list_lock)) { + a = a_tsd; + goto repeat; + } + (void)mutex_unlock(&list_lock); + /* Nothing immediately available, so generate a new arena. */ h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT)); if(!h) @@ -1963,7 +2159,10 @@ static void do_check_chunk(ar_ptr, p) arena *ar_ptr; mchunkptr p; if(ar_ptr != &main_arena) { heap_info *heap = heap_for_ptr(p); assert(heap->ar_ptr == ar_ptr); - assert((char *)p + sz <= (char *)heap + heap->size); + if(p != top(ar_ptr)) + assert((char *)p + sz <= (char *)heap + heap->size); + else + assert((char *)p + sz == (char *)heap + heap->size); return; } #endif @@ -2162,10 +2361,16 @@ arena *ar_ptr; mchunkptr p; INTERNAL_SIZE_T s; Main interface to sbrk (but see also malloc_trim). */ +#if defined __GNUC__ && __GNUC__ >= 2 +/* This function is called only from one place, inline it. */ +inline +#endif +static void +internal_function #if __STD_C -static void malloc_extend_top(arena *ar_ptr, INTERNAL_SIZE_T nb) +malloc_extend_top(arena *ar_ptr, INTERNAL_SIZE_T nb) #else -static void malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb; +malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb; #endif { unsigned long pagesz = malloc_getpagesize; @@ -2200,6 +2405,12 @@ static void malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb; (brk < old_end && old_top != initial_top(&main_arena))) return; +#if defined _LIBC || defined MALLOC_HOOKS + /* Call the `morecore' hook if necessary. */ + if (__after_morecore_hook) + (*__after_morecore_hook) (); +#endif + sbrked_mem += sbrk_size; if (brk == old_end) { /* can just add bytes to current top */ @@ -2228,6 +2439,12 @@ static void malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb; new_brk = (char*)(MORECORE (correction)); if (new_brk == (char*)(MORECORE_FAILURE)) return; +#if defined _LIBC || defined MALLOC_HOOKS + /* Call the `morecore' hook if necessary. */ + if (__after_morecore_hook) + (*__after_morecore_hook) (); +#endif + sbrked_mem += correction; top(&main_arena) = (mchunkptr)brk; @@ -2267,7 +2484,7 @@ static void malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb; } /* A new heap must be created. */ - heap = new_heap(nb + top_pad + (MINSIZE + sizeof(*heap))); + heap = new_heap(nb + (MINSIZE + sizeof(*heap))); if(!heap) return; heap->ar_ptr = ar_ptr; @@ -2361,7 +2578,7 @@ static void malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb; mallocs with other sbrk calls. - All allocations are made from the the `lowest' part of any found + All allocations are made from the `lowest' part of any found chunk. (The implementation invariant is that prev_inuse is always true of any allocated chunk; i.e., that each allocated chunk borders either a previously allocated and still in-use chunk, @@ -2379,25 +2596,39 @@ Void_t* mALLOc(bytes) size_t bytes; INTERNAL_SIZE_T nb; /* padded request size */ mchunkptr victim; -#if defined(_LIBC) || defined(MALLOC_HOOKS) +#if defined _LIBC || defined MALLOC_HOOKS if (__malloc_hook != NULL) { Void_t* result; - result = (*__malloc_hook)(bytes); +#if defined __GNUC__ && __GNUC__ >= 2 + result = (*__malloc_hook)(bytes, __builtin_return_address (0)); +#else + result = (*__malloc_hook)(bytes, NULL); +#endif return result; } #endif nb = request2size(bytes); - arena_get(ar_ptr, nb + top_pad); + arena_get(ar_ptr, nb); if(!ar_ptr) return 0; victim = chunk_alloc(ar_ptr, nb); (void)mutex_unlock(&ar_ptr->mutex); - return victim ? chunk2mem(victim) : 0; + if(!victim) { + /* Maybe the failure is due to running out of mmapped areas. */ + if(ar_ptr != &main_arena) { + (void)mutex_lock(&main_arena.mutex); + victim = chunk_alloc(&main_arena, nb); + (void)mutex_unlock(&main_arena.mutex); + } + if(!victim) return 0; + } + return chunk2mem(victim); } static mchunkptr +internal_function #if __STD_C chunk_alloc(arena *ar_ptr, INTERNAL_SIZE_T nb) #else @@ -2663,9 +2894,13 @@ void fREe(mem) Void_t* mem; arena *ar_ptr; mchunkptr p; /* chunk corresponding to mem */ -#if defined(_LIBC) || defined(MALLOC_HOOKS) +#if defined _LIBC || defined MALLOC_HOOKS if (__free_hook != NULL) { - (*__free_hook)(mem); +#if defined __GNUC__ && __GNUC__ >= 2 + (*__free_hook)(mem, __builtin_return_address (0)); +#else + (*__free_hook)(mem, NULL); +#endif return; } #endif @@ -2699,6 +2934,7 @@ void fREe(mem) Void_t* mem; } static void +internal_function #if __STD_C chunk_free(arena *ar_ptr, mchunkptr p) #else @@ -2756,8 +2992,6 @@ chunk_free(ar_ptr, p) arena *ar_ptr; mchunkptr p; return; } - set_head(next, nextsz); /* clear inuse bit */ - islr = 0; if (!(hd & PREV_INUSE)) /* consolidate backward */ @@ -2784,12 +3018,29 @@ chunk_free(ar_ptr, p) arena *ar_ptr; mchunkptr p; } else unlink(next, bck, fwd); + + next = chunk_at_offset(p, sz); } + else + set_head(next, nextsz); /* clear inuse bit */ set_head(p, sz | PREV_INUSE); - set_foot(p, sz); + next->prev_size = sz; if (!islr) frontlink(ar_ptr, p, sz, idx, bck, fwd); + +#ifndef NO_THREADS + /* Check whether the heap containing top can go away now. */ + if(next->size < MINSIZE && + (unsigned long)sz > trim_threshold && + ar_ptr != &main_arena) { /* fencepost */ + heap_info* heap = heap_for_ptr(top(ar_ptr)); + + if(top(ar_ptr) == chunk_at_offset(heap, sizeof(*heap)) && + heap->prev == heap_for_ptr(p)) + heap_trim(heap, top_pad); + } +#endif } @@ -2846,11 +3097,15 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; mchunkptr newp; /* chunk to return */ -#if defined(_LIBC) || defined(MALLOC_HOOKS) +#if defined _LIBC || defined MALLOC_HOOKS if (__realloc_hook != NULL) { Void_t* result; - result = (*__realloc_hook)(oldmem, bytes); +#if defined __GNUC__ && __GNUC__ >= 2 + result = (*__realloc_hook)(oldmem, bytes, __builtin_return_address (0)); +#else + result = (*__realloc_hook)(oldmem, bytes, NULL); +#endif return result; } #endif @@ -2899,8 +3154,10 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; (void)mutex_lock(&ar_ptr->mutex); #endif +#ifndef NO_THREADS /* As in malloc(), remember this arena for the next allocation. */ tsd_setspecific(arena_key, (Void_t *)ar_ptr); +#endif newp = chunk_realloc(ar_ptr, oldp, oldsize, nb); @@ -2909,6 +3166,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; } static mchunkptr +internal_function #if __STD_C chunk_realloc(arena* ar_ptr, mchunkptr oldp, INTERNAL_SIZE_T oldsize, INTERNAL_SIZE_T nb) @@ -3025,8 +3283,16 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb; newp = chunk_alloc (ar_ptr, nb); - if (newp == 0) /* propagate failure */ - return 0; + if (newp == 0) { + /* Maybe the failure is due to running out of mmapped areas. */ + if (ar_ptr != &main_arena) { + (void)mutex_lock(&main_arena.mutex); + newp = chunk_alloc(&main_arena, nb); + (void)mutex_unlock(&main_arena.mutex); + } + if (newp == 0) /* propagate failure */ + return 0; + } /* Avoid copy if newp is next chunk after oldp. */ /* (This can only happen when new chunk is sbrk'ed.) */ @@ -3098,11 +3364,16 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; INTERNAL_SIZE_T nb; /* padded request size */ mchunkptr p; -#if defined(_LIBC) || defined(MALLOC_HOOKS) +#if defined _LIBC || defined MALLOC_HOOKS if (__memalign_hook != NULL) { Void_t* result; - result = (*__memalign_hook)(alignment, bytes); +#if defined __GNUC__ && __GNUC__ >= 2 + result = (*__memalign_hook)(alignment, bytes, + __builtin_return_address (0)); +#else + result = (*__memalign_hook)(alignment, bytes, NULL); +#endif return result; } #endif @@ -3121,10 +3392,20 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; return 0; p = chunk_align(ar_ptr, nb, alignment); (void)mutex_unlock(&ar_ptr->mutex); - return p ? chunk2mem(p) : NULL; + if(!p) { + /* Maybe the failure is due to running out of mmapped areas. */ + if(ar_ptr != &main_arena) { + (void)mutex_lock(&main_arena.mutex); + p = chunk_align(&main_arena, nb, alignment); + (void)mutex_unlock(&main_arena.mutex); + } + if(!p) return 0; + } + return chunk2mem(p); } static mchunkptr +internal_function #if __STD_C chunk_align(arena* ar_ptr, INTERNAL_SIZE_T nb, size_t alignment) #else @@ -3261,16 +3542,22 @@ Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size; INTERNAL_SIZE_T sz, csz, oldtopsize; Void_t* mem; -#if defined(_LIBC) || defined(MALLOC_HOOKS) +#if defined _LIBC || defined MALLOC_HOOKS if (__malloc_hook != NULL) { sz = n * elem_size; - mem = (*__malloc_hook)(sz); -#ifdef HAVE_MEMCPY - memset(mem, 0, sz); +#if defined __GNUC__ && __GNUC__ >= 2 + mem = (*__malloc_hook)(sz, __builtin_return_address (0)); #else - while(sz > 0) mem[--sz] = 0; /* rather inefficient */ + mem = (*__malloc_hook)(sz, NULL); #endif + if(mem == 0) + return 0; +#ifdef HAVE_MEMSET + return memset(mem, 0, sz); +#else + while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */ return mem; +#endif } #endif @@ -3289,31 +3576,34 @@ Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size; /* Only clearing follows, so we can unlock early. */ (void)mutex_unlock(&ar_ptr->mutex); - if (p == 0) - return 0; - else - { - mem = chunk2mem(p); + if (p == 0) { + /* Maybe the failure is due to running out of mmapped areas. */ + if(ar_ptr != &main_arena) { + (void)mutex_lock(&main_arena.mutex); + p = chunk_alloc(&main_arena, sz); + (void)mutex_unlock(&main_arena.mutex); + } + if (p == 0) return 0; + } + mem = chunk2mem(p); - /* Two optional cases in which clearing not necessary */ + /* Two optional cases in which clearing not necessary */ #if HAVE_MMAP - if (chunk_is_mmapped(p)) return mem; + if (chunk_is_mmapped(p)) return mem; #endif - csz = chunksize(p); + csz = chunksize(p); #if MORECORE_CLEARS - if (p == oldtop && csz > oldtopsize) - { - /* clear only the bytes from non-freshly-sbrked memory */ - csz = oldtopsize; - } + if (p == oldtop && csz > oldtopsize) { + /* clear only the bytes from non-freshly-sbrked memory */ + csz = oldtopsize; + } #endif - MALLOC_ZERO(mem, csz - SIZE_SZ); - return mem; - } + MALLOC_ZERO(mem, csz - SIZE_SZ); + return mem; } /* @@ -3376,6 +3666,7 @@ int mALLOC_TRIm(pad) size_t pad; /* Trim the main arena. */ static int +internal_function #if __STD_C main_trim(size_t pad) #else @@ -3404,6 +3695,12 @@ main_trim(pad) size_t pad; new_brk = (char*)(MORECORE (-extra)); +#if defined _LIBC || defined MALLOC_HOOKS + /* Call the `morecore' hook if necessary. */ + if (__after_morecore_hook) + (*__after_morecore_hook) (); +#endif + if (new_brk == (char*)(MORECORE_FAILURE)) { /* sbrk failed? */ /* Try to figure out what we have */ current_brk = (char*)(MORECORE (0)); @@ -3427,6 +3724,7 @@ main_trim(pad) size_t pad; #ifndef NO_THREADS static int +internal_function #if __STD_C heap_trim(heap_info *heap, size_t pad) #else @@ -3561,6 +3859,7 @@ malloc_update_mallinfo(ar_ptr, mi) arena *ar_ptr; struct mallinfo *mi; mi->arena = ar_ptr->size; mi->ordblks = navail; + mi->smblks = mi->usmblks = mi->fsmblks = 0; /* clear unused fields */ mi->uordblks = ar_ptr->size - avail; mi->fordblks = avail; mi->hblks = n_mmaps; @@ -3648,8 +3947,9 @@ void mALLOC_STATs() #endif #if !defined(NO_THREADS) && MALLOC_DEBUG > 1 if(ar_ptr != &main_arena) { + heap_info *heap; (void)mutex_lock(&ar_ptr->mutex); - heap_info *heap = heap_for_ptr(top(ar_ptr)); + heap = heap_for_ptr(top(ar_ptr)); while(heap) { dump_heap(heap); heap = heap->prev; } (void)mutex_unlock(&ar_ptr->mutex); } @@ -3657,7 +3957,11 @@ void mALLOC_STATs() ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } +#if HAVE_MMAP fprintf(stderr, "Total (incl. mmap):\n"); +#else + fprintf(stderr, "Total:\n"); +#endif fprintf(stderr, "system bytes = %10u\n", system_b); fprintf(stderr, "in use bytes = %10u\n", in_use_b); #ifdef NO_THREADS @@ -3665,6 +3969,7 @@ void mALLOC_STATs() #endif #if HAVE_MMAP fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)max_n_mmaps); + fprintf(stderr, "max mmap bytes = %10lu\n", max_mmapped_mem); #endif #if THREAD_STATS fprintf(stderr, "heaps created = %10d\n", stat_n_heaps); @@ -3686,7 +3991,9 @@ struct mallinfo mALLINFo() struct mallinfo mi; Void_t *vptr = NULL; +#ifndef NO_THREADS tsd_getspecific(arena_key, vptr); +#endif malloc_update_mallinfo((vptr ? (arena*)vptr : &main_arena), &mi); return mi; } @@ -3741,19 +4048,197 @@ int mALLOPt(param_number, value) int param_number; int value; -#if defined(_LIBC) || defined(MALLOC_HOOKS) +/* Get/set state: malloc_get_state() records the current state of all + malloc variables (_except_ for the actual heap contents and `hook' + function pointers) in a system dependent, opaque data structure. + This data structure is dynamically allocated and can be free()d + after use. malloc_set_state() restores the state of all malloc + variables to the previously obtained state. This is especially + useful when using this malloc as part of a shared library, and when + the heap contents are saved/restored via some other method. The + primary example for this is GNU Emacs with its `dumping' procedure. + `Hook' function pointers are never saved or restored by these + functions. */ + +#define MALLOC_STATE_MAGIC 0x444c4541l +#define MALLOC_STATE_VERSION (0*0x100l + 1l) /* major*0x100 + minor */ + +struct malloc_state { + long magic; + long version; + mbinptr av[NAV * 2 + 2]; + char* sbrk_base; + int sbrked_mem_bytes; + unsigned long trim_threshold; + unsigned long top_pad; + unsigned int n_mmaps_max; + unsigned long mmap_threshold; + int check_action; + unsigned long max_sbrked_mem; + unsigned long max_total_mem; + unsigned int n_mmaps; + unsigned int max_n_mmaps; + unsigned long mmapped_mem; + unsigned long max_mmapped_mem; + int using_malloc_checking; +}; + +Void_t* +mALLOC_GET_STATe() +{ + struct malloc_state* ms; + int i; + mbinptr b; + + ms = (struct malloc_state*)mALLOc(sizeof(*ms)); + if (!ms) + return 0; + (void)mutex_lock(&main_arena.mutex); + ms->magic = MALLOC_STATE_MAGIC; + ms->version = MALLOC_STATE_VERSION; + ms->av[0] = main_arena.av[0]; + ms->av[1] = main_arena.av[1]; + for(i=0; iav[2*i+2] = ms->av[2*i+3] = 0; /* empty bin (or initial top) */ + else { + ms->av[2*i+2] = first(b); + ms->av[2*i+3] = last(b); + } + } + ms->sbrk_base = sbrk_base; + ms->sbrked_mem_bytes = sbrked_mem; + ms->trim_threshold = trim_threshold; + ms->top_pad = top_pad; + ms->n_mmaps_max = n_mmaps_max; + ms->mmap_threshold = mmap_threshold; + ms->check_action = check_action; + ms->max_sbrked_mem = max_sbrked_mem; +#ifdef NO_THREADS + ms->max_total_mem = max_total_mem; +#else + ms->max_total_mem = 0; +#endif + ms->n_mmaps = n_mmaps; + ms->max_n_mmaps = max_n_mmaps; + ms->mmapped_mem = mmapped_mem; + ms->max_mmapped_mem = max_mmapped_mem; +#if defined _LIBC || defined MALLOC_HOOKS + ms->using_malloc_checking = using_malloc_checking; +#else + ms->using_malloc_checking = 0; +#endif + (void)mutex_unlock(&main_arena.mutex); + return (Void_t*)ms; +} + +int +#if __STD_C +mALLOC_SET_STATe(Void_t* msptr) +#else +mALLOC_SET_STATe(msptr) Void_t* msptr; +#endif +{ + struct malloc_state* ms = (struct malloc_state*)msptr; + int i; + mbinptr b; + +#if defined _LIBC || defined MALLOC_HOOKS + disallow_malloc_check = 1; +#endif + ptmalloc_init(); + if(ms->magic != MALLOC_STATE_MAGIC) return -1; + /* Must fail if the major version is too high. */ + if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2; + (void)mutex_lock(&main_arena.mutex); + main_arena.av[0] = ms->av[0]; + main_arena.av[1] = ms->av[1]; + for(i=0; iav[2*i+2] == 0) + first(b) = last(b) = b; + else { + first(b) = ms->av[2*i+2]; + last(b) = ms->av[2*i+3]; + if(i > 0) { + /* Make sure the links to the `av'-bins in the heap are correct. */ + first(b)->bk = b; + last(b)->fd = b; + } + } + } + sbrk_base = ms->sbrk_base; + sbrked_mem = ms->sbrked_mem_bytes; + trim_threshold = ms->trim_threshold; + top_pad = ms->top_pad; + n_mmaps_max = ms->n_mmaps_max; + mmap_threshold = ms->mmap_threshold; + check_action = ms->check_action; + max_sbrked_mem = ms->max_sbrked_mem; +#ifdef NO_THREADS + max_total_mem = ms->max_total_mem; +#endif + n_mmaps = ms->n_mmaps; + max_n_mmaps = ms->max_n_mmaps; + mmapped_mem = ms->mmapped_mem; + max_mmapped_mem = ms->max_mmapped_mem; + /* add version-dependent code here */ + if (ms->version >= 1) { +#if defined _LIBC || defined MALLOC_HOOKS + /* Check whether it is safe to enable malloc checking. */ + if (ms->using_malloc_checking && !using_malloc_checking && + !disallow_malloc_check) + __malloc_check_init (); +#endif + } + + (void)mutex_unlock(&main_arena.mutex); + return 0; +} + + + +#if defined _LIBC || defined MALLOC_HOOKS /* A simple, standard set of debugging hooks. Overhead is `only' one byte per chunk; still this will catch most cases of double frees or - overruns. */ + overruns. The goal here is to avoid obscure crashes due to invalid + usage, unlike in the MALLOC_DEBUG code. */ + +#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF ) -#define MAGICBYTE(p) ( ( ((unsigned)p >> 3) ^ ((unsigned)p >> 11)) & 0xFF ) +/* Instrument a chunk with overrun detector byte(s) and convert it + into a user pointer with requested size sz. */ + +static Void_t* +#if __STD_C +chunk2mem_check(mchunkptr p, size_t sz) +#else +chunk2mem_check(p, sz) mchunkptr p; size_t sz; +#endif +{ + unsigned char* m_ptr = (unsigned char*)chunk2mem(p); + size_t i; + + for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1); + i > sz; + i -= 0xFF) { + if(i-sz < 0x100) { + m_ptr[i] = (unsigned char)(i-sz); + break; + } + m_ptr[i] = 0xFF; + } + m_ptr[sz] = MAGICBYTE(p); + return (Void_t*)m_ptr; +} /* Convert a pointer to be free()d or realloc()ed to a valid chunk - pointer. If the provided pointer is not valid, return NULL. The - goal here is to avoid crashes, unlike in the MALLOC_DEBUG code. */ + pointer. If the provided pointer is not valid, return NULL. */ static mchunkptr +internal_function #if __STD_C mem2chunk_check(Void_t* mem) #else @@ -3761,7 +4246,8 @@ mem2chunk_check(mem) Void_t* mem; #endif { mchunkptr p; - INTERNAL_SIZE_T sz; + INTERNAL_SIZE_T sz, c; + unsigned char magic; p = mem2chunk(mem); if(!aligned_OK(p)) return NULL; @@ -3774,9 +4260,11 @@ mem2chunk_check(mem) Void_t* mem; (long)prev_chunk(p)<(long)sbrk_base || next_chunk(prev_chunk(p))!=p) )) return NULL; - if(*((unsigned char*)p + sz + (SIZE_SZ-1)) != MAGICBYTE(p)) - return NULL; - *((unsigned char*)p + sz + (SIZE_SZ-1)) ^= 0xFF; + magic = MAGICBYTE(p); + for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { + if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; + } + ((unsigned char*)p)[sz] ^= 0xFF; } else { unsigned long offset, page_mask = malloc_getpagesize-1; @@ -3792,41 +4280,79 @@ mem2chunk_check(mem) Void_t* mem; ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) || ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) ) return NULL; - if(*((unsigned char*)p + sz - 1) != MAGICBYTE(p)) - return NULL; - *((unsigned char*)p + sz - 1) ^= 0xFF; + magic = MAGICBYTE(p); + for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { + if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; + } + ((unsigned char*)p)[sz] ^= 0xFF; } return p; } +/* Check for corruption of the top chunk, and try to recover if + necessary. */ + +static int +#if __STD_C +top_check(void) +#else +top_check() +#endif +{ + mchunkptr t = top(&main_arena); + char* brk, * new_brk; + INTERNAL_SIZE_T front_misalign, sbrk_size; + unsigned long pagesz = malloc_getpagesize; + + if((char*)t + chunksize(t) == sbrk_base + sbrked_mem || + t == initial_top(&main_arena)) return 0; + + switch(check_action) { + case 1: + fprintf(stderr, "malloc: top chunk is corrupt\n"); + break; + case 2: + abort(); + } + /* Try to set up a new top chunk. */ + brk = MORECORE(0); + front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) + front_misalign = MALLOC_ALIGNMENT - front_misalign; + sbrk_size = front_misalign + top_pad + MINSIZE; + sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1)); + new_brk = (char*)(MORECORE (sbrk_size)); + if (new_brk == (char*)(MORECORE_FAILURE)) return -1; + sbrked_mem = (new_brk - sbrk_base) + sbrk_size; + + top(&main_arena) = (mchunkptr)(brk + front_misalign); + set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE); + + return 0; +} + static Void_t* #if __STD_C -malloc_check(size_t sz) +malloc_check(size_t sz, const Void_t *caller) #else -malloc_check(sz) size_t sz; +malloc_check(sz, caller) size_t sz; const Void_t *caller; #endif { mchunkptr victim; INTERNAL_SIZE_T nb = request2size(sz + 1); (void)mutex_lock(&main_arena.mutex); - victim = chunk_alloc(&main_arena, nb); + victim = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL; (void)mutex_unlock(&main_arena.mutex); if(!victim) return NULL; - nb = chunksize(victim); - if(chunk_is_mmapped(victim)) - --nb; - else - nb += SIZE_SZ - 1; - *((unsigned char*)victim + nb) = MAGICBYTE(victim); - return chunk2mem(victim); + return chunk2mem_check(victim, sz); } static void #if __STD_C -free_check(Void_t* mem) +free_check(Void_t* mem, const Void_t *caller) #else -free_check(mem) Void_t* mem; +free_check(mem, caller) Void_t* mem; const Void_t *caller; #endif { mchunkptr p; @@ -3838,7 +4364,7 @@ free_check(mem) Void_t* mem; (void)mutex_unlock(&main_arena.mutex); switch(check_action) { case 1: - fprintf(stderr, "free(): invalid pointer %lx!\n", (long)(mem)); + fprintf(stderr, "free(): invalid pointer %p!\n", mem); break; case 2: abort(); @@ -3861,27 +4387,28 @@ free_check(mem) Void_t* mem; static Void_t* #if __STD_C -realloc_check(Void_t* oldmem, size_t bytes) +realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller) #else -realloc_check(oldmem, bytes) Void_t* oldmem; size_t bytes; +realloc_check(oldmem, bytes, caller) + Void_t* oldmem; size_t bytes; const Void_t *caller; #endif { mchunkptr oldp, newp; INTERNAL_SIZE_T nb, oldsize; - if (oldmem == 0) return malloc_check(bytes); + if (oldmem == 0) return malloc_check(bytes, NULL); (void)mutex_lock(&main_arena.mutex); oldp = mem2chunk_check(oldmem); if(!oldp) { (void)mutex_unlock(&main_arena.mutex); switch(check_action) { case 1: - fprintf(stderr, "realloc(): invalid pointer %lx!\n", (long)(oldmem)); + fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem); break; case 2: abort(); } - return malloc_check(bytes); + return malloc_check(bytes, NULL); } oldsize = chunksize(oldp); @@ -3897,7 +4424,7 @@ realloc_check(oldmem, bytes) Void_t* oldmem; size_t bytes; if(oldsize - SIZE_SZ >= nb) newp = oldp; /* do nothing */ else { /* Must alloc, copy, free. */ - newp = chunk_alloc(&main_arena, nb); + newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL; if (newp) { MALLOC_COPY(chunk2mem(newp), oldmem, oldsize - 2*SIZE_SZ); munmap_chunk(oldp); @@ -3908,7 +4435,8 @@ realloc_check(oldmem, bytes) Void_t* oldmem; size_t bytes; #endif } else { #endif /* HAVE_MMAP */ - newp = chunk_realloc(&main_arena, oldp, oldsize, nb); + newp = (top_check() >= 0) ? + chunk_realloc(&main_arena, oldp, oldsize, nb) : NULL; #if 0 /* Erase freed memory. */ nb = chunksize(newp); if(oldp=chunk_at_offset(newp, nb)) { @@ -3924,50 +4452,41 @@ realloc_check(oldmem, bytes) Void_t* oldmem; size_t bytes; (void)mutex_unlock(&main_arena.mutex); if(!newp) return NULL; - nb = chunksize(newp); - if(chunk_is_mmapped(newp)) - --nb; - else - nb += SIZE_SZ - 1; - *((unsigned char*)newp + nb) = MAGICBYTE(newp); - return chunk2mem(newp); + return chunk2mem_check(newp, bytes); } static Void_t* #if __STD_C -memalign_check(size_t alignment, size_t bytes) +memalign_check(size_t alignment, size_t bytes, const Void_t *caller) #else -memalign_check(alignment, bytes) size_t alignment; size_t bytes; +memalign_check(alignment, bytes, caller) + size_t alignment; size_t bytes; const Void_t *caller; #endif { INTERNAL_SIZE_T nb; mchunkptr p; - if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes); + if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL); if (alignment < MINSIZE) alignment = MINSIZE; nb = request2size(bytes+1); (void)mutex_lock(&main_arena.mutex); - p = chunk_align(&main_arena, nb, alignment); + p = (top_check() >= 0) ? chunk_align(&main_arena, nb, alignment) : NULL; (void)mutex_unlock(&main_arena.mutex); if(!p) return NULL; - nb = chunksize(p); - if(chunk_is_mmapped(p)) - --nb; - else - nb += SIZE_SZ - 1; - *((unsigned char*)p + nb) = MAGICBYTE(p); - return chunk2mem(p); + return chunk2mem_check(p, bytes); } +#ifndef NO_THREADS + /* The following hooks are used when the global initialization in ptmalloc_init() hasn't completed yet. */ static Void_t* #if __STD_C -malloc_starter(size_t sz) +malloc_starter(size_t sz, const Void_t *caller) #else -malloc_starter(sz) size_t sz; +malloc_starter(sz, caller) size_t sz; const Void_t *caller; #endif { mchunkptr victim = chunk_alloc(&main_arena, request2size(sz)); @@ -3977,9 +4496,9 @@ malloc_starter(sz) size_t sz; static void #if __STD_C -free_starter(Void_t* mem) +free_starter(Void_t* mem, const Void_t *caller) #else -free_starter(mem) Void_t* mem; +free_starter(mem, caller) Void_t* mem; const Void_t *caller; #endif { mchunkptr p; @@ -3995,7 +4514,68 @@ free_starter(mem) Void_t* mem; chunk_free(&main_arena, p); } -#endif /* defined(_LIBC) || defined(MALLOC_HOOKS) */ +/* The following hooks are used while the `atfork' handling mechanism + is active. */ + +static Void_t* +#if __STD_C +malloc_atfork (size_t sz, const Void_t *caller) +#else +malloc_atfork(sz, caller) size_t sz; const Void_t *caller; +#endif +{ + Void_t *vptr = NULL; + + tsd_getspecific(arena_key, vptr); + if(!vptr) { + mchunkptr victim = chunk_alloc(&main_arena, request2size(sz)); + return victim ? chunk2mem(victim) : 0; + } else { + /* Suspend the thread until the `atfork' handlers have completed. + By that time, the hooks will have been reset as well, so that + mALLOc() can be used again. */ + (void)mutex_lock(&list_lock); + (void)mutex_unlock(&list_lock); + return mALLOc(sz); + } +} + +static void +#if __STD_C +free_atfork(Void_t* mem, const Void_t *caller) +#else +free_atfork(mem, caller) Void_t* mem; const Void_t *caller; +#endif +{ + Void_t *vptr = NULL; + arena *ar_ptr; + mchunkptr p; /* chunk corresponding to mem */ + + if (mem == 0) /* free(0) has no effect */ + return; + + p = mem2chunk(mem); + +#if HAVE_MMAP + if (chunk_is_mmapped(p)) /* release mmapped memory. */ + { + munmap_chunk(p); + return; + } +#endif + + ar_ptr = arena_for_ptr(p); + tsd_getspecific(arena_key, vptr); + if(vptr) + (void)mutex_lock(&ar_ptr->mutex); + chunk_free(ar_ptr, p); + if(vptr) + (void)mutex_unlock(&ar_ptr->mutex); +} + +#endif + +#endif /* defined _LIBC || defined MALLOC_HOOKS */ @@ -4014,12 +4594,19 @@ weak_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt) weak_alias (__malloc_stats, malloc_stats) weak_alias (__malloc_usable_size, malloc_usable_size) weak_alias (__malloc_trim, malloc_trim) +weak_alias (__malloc_get_state, malloc_get_state) +weak_alias (__malloc_set_state, malloc_set_state) #endif /* History: + V2.6.4-pt3 Thu Feb 20 1997 Wolfram Gloger (wmglo@dent.med.uni-muenchen.de) + * Added malloc_get/set_state() (mainly for use in GNU emacs), + using interface from Marcus Daniels + * All parameters are now adjustable via environment variables + V2.6.4-pt2 Sat Dec 14 1996 Wolfram Gloger (wmglo@dent.med.uni-muenchen.de) * Added debugging hooks * Fixed possible deadlock in realloc() when out of memory