1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001,02 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
25 #define weak_variable /**/
27 /* In GNU libc we want the hook variables to be weak definitions to
28 avoid a problem with Emacs. */
29 #define weak_variable weak_function
33 #ifndef DEFAULT_CHECK_ACTION
34 #define DEFAULT_CHECK_ACTION 1
37 /* What to do if the standard debugging hooks are in place and a
38 corrupt pointer is detected: do nothing (0), print an error message
39 (1), or call abort() (2). */
41 /* Hooks for debugging versions. The initial hooks just call the
42 initialization routine, then do the normal work. */
46 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
48 malloc_hook_ini(sz, caller)
49 size_t sz; const __malloc_ptr_t caller;
54 return public_mALLOc(sz);
59 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
61 realloc_hook_ini(ptr, sz, caller)
62 Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
66 __realloc_hook = NULL;
68 return public_rEALLOc(ptr, sz);
73 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
75 memalign_hook_ini(alignment, sz, caller)
76 size_t alignment; size_t sz; const __malloc_ptr_t caller;
79 __memalign_hook = NULL;
81 return public_mEMALIGn(alignment, sz);
84 void weak_variable (*__malloc_initialize_hook) __MALLOC_P ((void)) = NULL;
85 void weak_variable (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
86 const __malloc_ptr_t)) = NULL;
87 __malloc_ptr_t weak_variable (*__malloc_hook)
88 __MALLOC_P ((size_t __size, const __malloc_ptr_t)) = malloc_hook_ini;
89 __malloc_ptr_t weak_variable (*__realloc_hook)
90 __MALLOC_P ((__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t))
92 __malloc_ptr_t weak_variable (*__memalign_hook)
93 __MALLOC_P ((size_t __alignment, size_t __size, const __malloc_ptr_t))
95 void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;
98 static int check_action = DEFAULT_CHECK_ACTION;
100 /* Whether we are using malloc checking. */
101 static int using_malloc_checking;
103 /* A flag that is set by malloc_set_state, to signal that malloc checking
104 must not be enabled on the request from the user (via the MALLOC_CHECK_
105 environment variable). It is reset by __malloc_check_init to tell
106 malloc_set_state that the user has requested malloc checking.
108 The purpose of this flag is to make sure that malloc checking is not
109 enabled when the heap to be restored was constructed without malloc
110 checking, and thus does not contain the required magic bytes.
111 Otherwise the heap would be corrupted by calls to free and realloc. If
112 it turns out that the heap was created with malloc checking and the
113 user has requested it malloc_set_state just calls __malloc_check_init
114 again to enable it. On the other hand, reusing such a heap without
115 further malloc checking is safe. */
116 static int disallow_malloc_check;
118 /* Activate a standard set of debugging hooks. */
120 __malloc_check_init()
122 if (disallow_malloc_check) {
123 disallow_malloc_check = 0;
126 using_malloc_checking = 1;
127 __malloc_hook = malloc_check;
128 __free_hook = free_check;
129 __realloc_hook = realloc_check;
130 __memalign_hook = memalign_check;
132 fprintf(stderr, "malloc: using debugging hooks\n");
135 /* A simple, standard set of debugging hooks. Overhead is `only' one
136 byte per chunk; still this will catch most cases of double frees or
137 overruns. The goal here is to avoid obscure crashes due to invalid
138 usage, unlike in the MALLOC_DEBUG code. */
140 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
142 /* Instrument a chunk with overrun detector byte(s) and convert it
143 into a user pointer with requested size sz. */
148 mem2mem_check(Void_t *ptr, size_t sz)
150 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
154 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
160 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
164 m_ptr[i] = (unsigned char)(i-sz);
169 m_ptr[sz] = MAGICBYTE(p);
170 return (Void_t*)m_ptr;
173 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
174 pointer. If the provided pointer is not valid, return NULL. */
179 mem2chunk_check(Void_t* mem)
181 mem2chunk_check(mem) Void_t* mem;
185 INTERNAL_SIZE_T sz, c;
189 if(!aligned_OK(p)) return NULL;
190 if( (char*)p>=mp_.sbrk_base &&
191 (char*)p<(mp_.sbrk_base+main_arena.system_mem) ) {
192 /* Must be a chunk in conventional heap memory. */
193 if(chunk_is_mmapped(p) ||
194 ( (sz = chunksize(p)),
195 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) ) ||
196 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
197 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
198 (long)prev_chunk(p)<(long)mp_.sbrk_base ||
199 next_chunk(prev_chunk(p))!=p) ))
201 magic = MAGICBYTE(p);
202 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
203 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
205 ((unsigned char*)p)[sz] ^= 0xFF;
207 unsigned long offset, page_mask = malloc_getpagesize-1;
209 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
210 alignment relative to the beginning of a page. Check this
212 offset = (unsigned long)mem & page_mask;
213 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
214 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
215 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
217 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
218 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
219 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
221 magic = MAGICBYTE(p);
222 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
223 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
225 ((unsigned char*)p)[sz] ^= 0xFF;
230 /* Check for corruption of the top chunk, and try to recover if
241 mchunkptr t = top(&main_arena);
242 char* brk, * new_brk;
243 INTERNAL_SIZE_T front_misalign, sbrk_size;
244 unsigned long pagesz = malloc_getpagesize;
246 if((char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem ||
247 t == initial_top(&main_arena)) return 0;
250 fprintf(stderr, "malloc: top chunk is corrupt\n");
254 /* Try to set up a new top chunk. */
256 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
257 if (front_misalign > 0)
258 front_misalign = MALLOC_ALIGNMENT - front_misalign;
259 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
260 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
261 new_brk = (char*)(MORECORE (sbrk_size));
262 if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
263 /* Call the `morecore' hook if necessary. */
264 if (__after_morecore_hook)
265 (*__after_morecore_hook) ();
266 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
268 top(&main_arena) = (mchunkptr)(brk + front_misalign);
269 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
276 malloc_check(size_t sz, const Void_t *caller)
278 malloc_check(sz, caller) size_t sz; const Void_t *caller;
283 (void)mutex_lock(&main_arena.mutex);
284 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
285 (void)mutex_unlock(&main_arena.mutex);
286 return mem2mem_check(victim, sz);
291 free_check(Void_t* mem, const Void_t *caller)
293 free_check(mem, caller) Void_t* mem; const Void_t *caller;
299 (void)mutex_lock(&main_arena.mutex);
300 p = mem2chunk_check(mem);
302 (void)mutex_unlock(&main_arena.mutex);
304 fprintf(stderr, "free(): invalid pointer %p!\n", mem);
310 if (chunk_is_mmapped(p)) {
311 (void)mutex_unlock(&main_arena.mutex);
316 #if 0 /* Erase freed memory. */
317 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
319 _int_free(&main_arena, mem);
320 (void)mutex_unlock(&main_arena.mutex);
325 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
327 realloc_check(oldmem, bytes, caller)
328 Void_t* oldmem; size_t bytes; const Void_t *caller;
332 INTERNAL_SIZE_T nb, oldsize;
335 if (oldmem == 0) return malloc_check(bytes, NULL);
336 (void)mutex_lock(&main_arena.mutex);
337 oldp = mem2chunk_check(oldmem);
338 (void)mutex_unlock(&main_arena.mutex);
341 fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
344 return malloc_check(bytes, NULL);
346 oldsize = chunksize(oldp);
348 checked_request2size(bytes+1, nb);
349 (void)mutex_lock(&main_arena.mutex);
352 if (chunk_is_mmapped(oldp)) {
354 mchunkptr newp = mremap_chunk(oldp, nb);
357 /* Note the extra SIZE_SZ overhead. */
358 if(oldsize - SIZE_SZ >= nb)
359 newmem = oldmem; /* do nothing */
361 /* Must alloc, copy, free. */
362 if (top_check() >= 0)
363 newmem = _int_malloc(&main_arena, bytes+1);
365 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
373 #endif /* HAVE_MMAP */
374 if (top_check() >= 0)
375 newmem = _int_realloc(&main_arena, oldmem, bytes+1);
376 #if 0 /* Erase freed memory. */
378 newp = mem2chunk(newmem);
379 nb = chunksize(newp);
380 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
381 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
382 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
383 } else if(nb > oldsize+SIZE_SZ) {
384 memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
385 0, nb - (oldsize+SIZE_SZ));
391 (void)mutex_unlock(&main_arena.mutex);
393 return mem2mem_check(newmem, bytes);
398 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
400 memalign_check(alignment, bytes, caller)
401 size_t alignment; size_t bytes; const Void_t *caller;
407 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
408 if (alignment < MINSIZE) alignment = MINSIZE;
410 checked_request2size(bytes+1, nb);
411 (void)mutex_lock(&main_arena.mutex);
412 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
414 (void)mutex_unlock(&main_arena.mutex);
415 return mem2mem_check(mem, bytes);
420 /* The following hooks are used when the global initialization in
421 ptmalloc_init() hasn't completed yet. */
425 malloc_starter(size_t sz, const Void_t *caller)
427 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
432 victim = _int_malloc(&main_arena, sz);
434 return victim ? BOUNDED_N(victim, sz) : 0;
439 free_starter(Void_t* mem, const Void_t *caller)
441 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
449 if (chunk_is_mmapped(p)) {
454 _int_free(&main_arena, mem);
457 #endif /* NO_THREADS */
460 /* Get/set state: malloc_get_state() records the current state of all
461 malloc variables (_except_ for the actual heap contents and `hook'
462 function pointers) in a system dependent, opaque data structure.
463 This data structure is dynamically allocated and can be free()d
464 after use. malloc_set_state() restores the state of all malloc
465 variables to the previously obtained state. This is especially
466 useful when using this malloc as part of a shared library, and when
467 the heap contents are saved/restored via some other method. The
468 primary example for this is GNU Emacs with its `dumping' procedure.
469 `Hook' function pointers are never saved or restored by these
470 functions, with two exceptions: If malloc checking was in use when
471 malloc_get_state() was called, then malloc_set_state() calls
472 __malloc_check_init() if possible; if malloc checking was not in
473 use in the recorded state but the user requested malloc checking,
474 then the hooks are reset to 0. */
476 #define MALLOC_STATE_MAGIC 0x444c4541l
477 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
479 struct malloc_save_state {
482 mbinptr av[NBINS * 2 + 2];
484 int sbrked_mem_bytes;
485 unsigned long trim_threshold;
486 unsigned long top_pad;
487 unsigned int n_mmaps_max;
488 unsigned long mmap_threshold;
490 unsigned long max_sbrked_mem;
491 unsigned long max_total_mem;
492 unsigned int n_mmaps;
493 unsigned int max_n_mmaps;
494 unsigned long mmapped_mem;
495 unsigned long max_mmapped_mem;
496 int using_malloc_checking;
500 public_gET_STATe(void)
502 struct malloc_save_state* ms;
506 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
509 (void)mutex_lock(&main_arena.mutex);
510 malloc_consolidate(&main_arena);
511 ms->magic = MALLOC_STATE_MAGIC;
512 ms->version = MALLOC_STATE_VERSION;
514 ms->av[1] = 0; /* used to be binblocks, now no longer used */
515 ms->av[2] = top(&main_arena);
516 ms->av[3] = 0; /* used to be undefined */
517 for(i=1; i<NBINS; i++) {
518 b = bin_at(&main_arena, i);
520 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
522 ms->av[2*i+2] = first(b);
523 ms->av[2*i+3] = last(b);
526 ms->sbrk_base = mp_.sbrk_base;
527 ms->sbrked_mem_bytes = main_arena.system_mem;
528 ms->trim_threshold = mp_.trim_threshold;
529 ms->top_pad = mp_.top_pad;
530 ms->n_mmaps_max = mp_.n_mmaps_max;
531 ms->mmap_threshold = mp_.mmap_threshold;
532 ms->check_action = check_action;
533 ms->max_sbrked_mem = main_arena.max_system_mem;
535 ms->max_total_mem = max_total_mem;
537 ms->max_total_mem = 0;
539 ms->n_mmaps = mp_.n_mmaps;
540 ms->max_n_mmaps = mp_.max_n_mmaps;
541 ms->mmapped_mem = mp_.mmapped_mem;
542 ms->max_mmapped_mem = mp_.max_mmapped_mem;
543 ms->using_malloc_checking = using_malloc_checking;
544 (void)mutex_unlock(&main_arena.mutex);
549 public_sET_STATe(Void_t* msptr)
551 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
555 disallow_malloc_check = 1;
557 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
558 /* Must fail if the major version is too high. */
559 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
560 (void)mutex_lock(&main_arena.mutex);
561 /* There are no fastchunks. */
562 clear_fastchunks(&main_arena);
563 set_max_fast(&main_arena, DEFAULT_MXFAST);
564 for (i=0; i<NFASTBINS; ++i)
565 main_arena.fastbins[i] = 0;
566 for (i=0; i<BINMAPSIZE; ++i)
567 main_arena.binmap[i] = 0;
568 top(&main_arena) = ms->av[2];
569 main_arena.last_remainder = 0;
570 for(i=1; i<NBINS; i++) {
571 b = bin_at(&main_arena, i);
572 if(ms->av[2*i+2] == 0) {
573 assert(ms->av[2*i+3] == 0);
574 first(b) = last(b) = b;
576 if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
577 largebin_index(chunksize(ms->av[2*i+3]))==i)) {
578 first(b) = ms->av[2*i+2];
579 last(b) = ms->av[2*i+3];
580 /* Make sure the links to the bins within the heap are correct. */
583 /* Set bit in binblocks. */
584 mark_bin(&main_arena, i);
586 /* Oops, index computation from chunksize must have changed.
587 Link the whole list into unsorted_chunks. */
588 first(b) = last(b) = b;
589 b = unsorted_chunks(&main_arena);
590 ms->av[2*i+2]->bk = b;
591 ms->av[2*i+3]->fd = b->fd;
592 b->fd->bk = ms->av[2*i+3];
593 b->fd = ms->av[2*i+2];
597 mp_.sbrk_base = ms->sbrk_base;
598 main_arena.system_mem = ms->sbrked_mem_bytes;
599 mp_.trim_threshold = ms->trim_threshold;
600 mp_.top_pad = ms->top_pad;
601 mp_.n_mmaps_max = ms->n_mmaps_max;
602 mp_.mmap_threshold = ms->mmap_threshold;
603 check_action = ms->check_action;
604 main_arena.max_system_mem = ms->max_sbrked_mem;
606 mp_.max_total_mem = ms->max_total_mem;
608 mp_.n_mmaps = ms->n_mmaps;
609 mp_.max_n_mmaps = ms->max_n_mmaps;
610 mp_.mmapped_mem = ms->mmapped_mem;
611 mp_.max_mmapped_mem = ms->max_mmapped_mem;
612 /* add version-dependent code here */
613 if (ms->version >= 1) {
614 /* Check whether it is safe to enable malloc checking, or whether
615 it is necessary to disable it. */
616 if (ms->using_malloc_checking && !using_malloc_checking &&
617 !disallow_malloc_check)
618 __malloc_check_init ();
619 else if (!ms->using_malloc_checking && using_malloc_checking) {
624 using_malloc_checking = 0;
627 check_malloc_state(&main_arena);
629 (void)mutex_unlock(&main_arena.mutex);