1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If not,
18 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
24 #define weak_variable /**/
27 #ifndef DEFAULT_CHECK_ACTION
28 #define DEFAULT_CHECK_ACTION 1
31 /* What to do if the standard debugging hooks are in place and a
32 corrupt pointer is detected: do nothing (0), print an error message
33 (1), or call abort() (2). */
35 /* Hooks for debugging versions. The initial hooks just call the
36 initialization routine, then do the normal work. */
40 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
42 malloc_hook_ini(sz, caller)
43 size_t sz; const __malloc_ptr_t caller;
48 return public_mALLOc(sz);
53 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
55 realloc_hook_ini(ptr, sz, caller)
56 Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
60 __realloc_hook = NULL;
62 return public_rEALLOc(ptr, sz);
67 memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
69 memalign_hook_ini(alignment, sz, caller)
70 size_t alignment; size_t sz; const __malloc_ptr_t caller;
73 __memalign_hook = NULL;
75 return public_mEMALIGn(alignment, sz);
78 void weak_variable (*__malloc_initialize_hook) __MALLOC_P ((void)) = NULL;
79 void weak_variable (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
80 const __malloc_ptr_t)) = NULL;
81 __malloc_ptr_t weak_variable (*__malloc_hook)
82 __MALLOC_P ((size_t __size, const __malloc_ptr_t)) = malloc_hook_ini;
83 __malloc_ptr_t weak_variable (*__realloc_hook)
84 __MALLOC_P ((__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t))
86 __malloc_ptr_t weak_variable (*__memalign_hook)
87 __MALLOC_P ((size_t __alignment, size_t __size, const __malloc_ptr_t))
89 void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;
92 static int check_action = DEFAULT_CHECK_ACTION;
94 /* Whether we are using malloc checking. */
95 static int using_malloc_checking;
97 /* A flag that is set by malloc_set_state, to signal that malloc checking
98 must not be enabled on the request from the user (via the MALLOC_CHECK_
99 environment variable). It is reset by __malloc_check_init to tell
100 malloc_set_state that the user has requested malloc checking.
102 The purpose of this flag is to make sure that malloc checking is not
103 enabled when the heap to be restored was constructed without malloc
104 checking, and thus does not contain the required magic bytes.
105 Otherwise the heap would be corrupted by calls to free and realloc. If
106 it turns out that the heap was created with malloc checking and the
107 user has requested it malloc_set_state just calls __malloc_check_init
108 again to enable it. On the other hand, reusing such a heap without
109 further malloc checking is safe. */
110 static int disallow_malloc_check;
112 /* Activate a standard set of debugging hooks. */
114 __malloc_check_init()
116 if (disallow_malloc_check) {
117 disallow_malloc_check = 0;
120 using_malloc_checking = 1;
121 __malloc_hook = malloc_check;
122 __free_hook = free_check;
123 __realloc_hook = realloc_check;
124 __memalign_hook = memalign_check;
126 fprintf(stderr, "malloc: using debugging hooks\n");
129 /* A simple, standard set of debugging hooks. Overhead is `only' one
130 byte per chunk; still this will catch most cases of double frees or
131 overruns. The goal here is to avoid obscure crashes due to invalid
132 usage, unlike in the MALLOC_DEBUG code. */
134 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
136 /* Instrument a chunk with overrun detector byte(s) and convert it
137 into a user pointer with requested size sz. */
142 mem2mem_check(Void_t *ptr, size_t sz)
144 mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
148 unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
154 for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
158 m_ptr[i] = (unsigned char)(i-sz);
163 m_ptr[sz] = MAGICBYTE(p);
164 return (Void_t*)m_ptr;
167 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
168 pointer. If the provided pointer is not valid, return NULL. */
173 mem2chunk_check(Void_t* mem)
175 mem2chunk_check(mem) Void_t* mem;
179 INTERNAL_SIZE_T sz, c;
183 if(!aligned_OK(p)) return NULL;
184 if( (char*)p>=mp_.sbrk_base &&
185 (char*)p<(mp_.sbrk_base+main_arena.system_mem) ) {
186 /* Must be a chunk in conventional heap memory. */
187 if(chunk_is_mmapped(p) ||
188 ( (sz = chunksize(p)),
189 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) ) ||
190 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
191 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
192 (long)prev_chunk(p)<(long)mp_.sbrk_base ||
193 next_chunk(prev_chunk(p))!=p) ))
195 magic = MAGICBYTE(p);
196 for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
197 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
199 ((unsigned char*)p)[sz] ^= 0xFF;
201 unsigned long offset, page_mask = malloc_getpagesize-1;
203 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
204 alignment relative to the beginning of a page. Check this
206 offset = (unsigned long)mem & page_mask;
207 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
208 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
209 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
211 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
212 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
213 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
215 magic = MAGICBYTE(p);
216 for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
217 if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
219 ((unsigned char*)p)[sz] ^= 0xFF;
224 /* Check for corruption of the top chunk, and try to recover if
235 mchunkptr t = top(&main_arena);
236 char* brk, * new_brk;
237 INTERNAL_SIZE_T front_misalign, sbrk_size;
238 unsigned long pagesz = malloc_getpagesize;
240 if((char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem ||
241 t == initial_top(&main_arena)) return 0;
244 fprintf(stderr, "malloc: top chunk is corrupt\n");
248 /* Try to set up a new top chunk. */
250 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
251 if (front_misalign > 0)
252 front_misalign = MALLOC_ALIGNMENT - front_misalign;
253 sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
254 sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
255 new_brk = (char*)(MORECORE (sbrk_size));
256 if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
257 /* Call the `morecore' hook if necessary. */
258 if (__after_morecore_hook)
259 (*__after_morecore_hook) ();
260 main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
262 top(&main_arena) = (mchunkptr)(brk + front_misalign);
263 set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
270 malloc_check(size_t sz, const Void_t *caller)
272 malloc_check(sz, caller) size_t sz; const Void_t *caller;
277 (void)mutex_lock(&main_arena.mutex);
278 victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
279 (void)mutex_unlock(&main_arena.mutex);
280 return mem2mem_check(victim, sz);
285 free_check(Void_t* mem, const Void_t *caller)
287 free_check(mem, caller) Void_t* mem; const Void_t *caller;
293 (void)mutex_lock(&main_arena.mutex);
294 p = mem2chunk_check(mem);
296 (void)mutex_unlock(&main_arena.mutex);
298 fprintf(stderr, "free(): invalid pointer %p!\n", mem);
304 if (chunk_is_mmapped(p)) {
305 (void)mutex_unlock(&main_arena.mutex);
310 #if 0 /* Erase freed memory. */
311 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
313 _int_free(&main_arena, mem);
314 (void)mutex_unlock(&main_arena.mutex);
319 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
321 realloc_check(oldmem, bytes, caller)
322 Void_t* oldmem; size_t bytes; const Void_t *caller;
325 mchunkptr oldp, newp = 0;
326 INTERNAL_SIZE_T nb, oldsize;
329 if (oldmem == 0) return malloc_check(bytes, NULL);
330 (void)mutex_lock(&main_arena.mutex);
331 oldp = mem2chunk_check(oldmem);
332 (void)mutex_unlock(&main_arena.mutex);
335 fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
338 return malloc_check(bytes, NULL);
340 oldsize = chunksize(oldp);
342 checked_request2size(bytes+1, nb);
343 (void)mutex_lock(&main_arena.mutex);
346 if (chunk_is_mmapped(oldp)) {
348 newp = mremap_chunk(oldp, nb);
351 /* Note the extra SIZE_SZ overhead. */
352 if(oldsize - SIZE_SZ >= nb)
353 newmem = oldmem; /* do nothing */
355 /* Must alloc, copy, free. */
356 if (top_check() >= 0)
357 newmem = _int_malloc(&main_arena, bytes+1);
359 MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
367 #endif /* HAVE_MMAP */
368 if (top_check() >= 0)
369 newmem = _int_realloc(&main_arena, oldmem, bytes+1);
370 #if 0 /* Erase freed memory. */
372 newp = mem2chunk(newmem);
373 nb = chunksize(newp);
374 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
375 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
376 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
377 } else if(nb > oldsize+SIZE_SZ) {
378 memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
379 0, nb - (oldsize+SIZE_SZ));
385 (void)mutex_unlock(&main_arena.mutex);
387 return mem2mem_check(newmem, bytes);
392 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
394 memalign_check(alignment, bytes, caller)
395 size_t alignment; size_t bytes; const Void_t *caller;
401 if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
402 if (alignment < MINSIZE) alignment = MINSIZE;
404 checked_request2size(bytes+1, nb);
405 (void)mutex_lock(&main_arena.mutex);
406 mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
408 (void)mutex_unlock(&main_arena.mutex);
409 return mem2mem_check(mem, bytes);
414 /* The following hooks are used when the global initialization in
415 ptmalloc_init() hasn't completed yet. */
419 malloc_starter(size_t sz, const Void_t *caller)
421 malloc_starter(sz, caller) size_t sz; const Void_t *caller;
426 victim = _int_malloc(&main_arena, sz);
428 return victim ? BOUNDED_N(victim, sz) : 0;
433 free_starter(Void_t* mem, const Void_t *caller)
435 free_starter(mem, caller) Void_t* mem; const Void_t *caller;
443 if (chunk_is_mmapped(p)) {
448 _int_free(&main_arena, mem);
451 #endif /* NO_THREADS */
454 /* Get/set state: malloc_get_state() records the current state of all
455 malloc variables (_except_ for the actual heap contents and `hook'
456 function pointers) in a system dependent, opaque data structure.
457 This data structure is dynamically allocated and can be free()d
458 after use. malloc_set_state() restores the state of all malloc
459 variables to the previously obtained state. This is especially
460 useful when using this malloc as part of a shared library, and when
461 the heap contents are saved/restored via some other method. The
462 primary example for this is GNU Emacs with its `dumping' procedure.
463 `Hook' function pointers are never saved or restored by these
464 functions, with two exceptions: If malloc checking was in use when
465 malloc_get_state() was called, then malloc_set_state() calls
466 __malloc_check_init() if possible; if malloc checking was not in
467 use in the recorded state but the user requested malloc checking,
468 then the hooks are reset to 0. */
470 #define MALLOC_STATE_MAGIC 0x444c4541l
471 #define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
473 struct malloc_save_state {
476 mbinptr av[NBINS * 2 + 2];
478 int sbrked_mem_bytes;
479 unsigned long trim_threshold;
480 unsigned long top_pad;
481 unsigned int n_mmaps_max;
482 unsigned long mmap_threshold;
484 unsigned long max_sbrked_mem;
485 unsigned long max_total_mem;
486 unsigned int n_mmaps;
487 unsigned int max_n_mmaps;
488 unsigned long mmapped_mem;
489 unsigned long max_mmapped_mem;
490 int using_malloc_checking;
494 public_gET_STATe(void)
496 struct malloc_save_state* ms;
500 ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
503 (void)mutex_lock(&main_arena.mutex);
504 malloc_consolidate(&main_arena);
505 ms->magic = MALLOC_STATE_MAGIC;
506 ms->version = MALLOC_STATE_VERSION;
508 ms->av[1] = 0; /* used to be binblocks, now no longer used */
509 ms->av[2] = top(&main_arena);
510 ms->av[3] = 0; /* used to be undefined */
511 for(i=1; i<NBINS; i++) {
512 b = bin_at(&main_arena, i);
514 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
516 ms->av[2*i+2] = first(b);
517 ms->av[2*i+3] = last(b);
520 ms->sbrk_base = mp_.sbrk_base;
521 ms->sbrked_mem_bytes = main_arena.system_mem;
522 ms->trim_threshold = mp_.trim_threshold;
523 ms->top_pad = mp_.top_pad;
524 ms->n_mmaps_max = mp_.n_mmaps_max;
525 ms->mmap_threshold = mp_.mmap_threshold;
526 ms->check_action = check_action;
527 ms->max_sbrked_mem = main_arena.max_system_mem;
529 ms->max_total_mem = max_total_mem;
531 ms->max_total_mem = 0;
533 ms->n_mmaps = mp_.n_mmaps;
534 ms->max_n_mmaps = mp_.max_n_mmaps;
535 ms->mmapped_mem = mp_.mmapped_mem;
536 ms->max_mmapped_mem = mp_.max_mmapped_mem;
537 ms->using_malloc_checking = using_malloc_checking;
538 (void)mutex_unlock(&main_arena.mutex);
543 public_sET_STATe(Void_t* msptr)
545 struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
549 disallow_malloc_check = 1;
551 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
552 /* Must fail if the major version is too high. */
553 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
554 (void)mutex_lock(&main_arena.mutex);
555 /* There are no fastchunks. */
556 clear_fastchunks(&main_arena);
557 set_max_fast(&main_arena, DEFAULT_MXFAST);
558 for (i=0; i<NFASTBINS; ++i)
559 main_arena.fastbins[i] = 0;
560 for (i=0; i<BINMAPSIZE; ++i)
561 main_arena.binmap[i] = 0;
562 top(&main_arena) = ms->av[2];
563 main_arena.last_remainder = 0;
564 for(i=1; i<NBINS; i++) {
565 b = bin_at(&main_arena, i);
566 if(ms->av[2*i+2] == 0) {
567 assert(ms->av[2*i+3] == 0);
568 first(b) = last(b) = b;
570 if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
571 largebin_index(chunksize(ms->av[2*i+3]))==i)) {
572 first(b) = ms->av[2*i+2];
573 last(b) = ms->av[2*i+3];
574 /* Make sure the links to the bins within the heap are correct. */
577 /* Set bit in binblocks. */
578 mark_bin(&main_arena, i);
580 /* Oops, index computation from chunksize must have changed.
581 Link the whole list into unsorted_chunks. */
582 first(b) = last(b) = b;
583 b = unsorted_chunks(&main_arena);
584 ms->av[2*i+2]->bk = b;
585 ms->av[2*i+3]->fd = b->fd;
586 b->fd->bk = ms->av[2*i+3];
587 b->fd = ms->av[2*i+2];
591 mp_.sbrk_base = ms->sbrk_base;
592 main_arena.system_mem = ms->sbrked_mem_bytes;
593 mp_.trim_threshold = ms->trim_threshold;
594 mp_.top_pad = ms->top_pad;
595 mp_.n_mmaps_max = ms->n_mmaps_max;
596 mp_.mmap_threshold = ms->mmap_threshold;
597 check_action = ms->check_action;
598 main_arena.max_system_mem = ms->max_sbrked_mem;
600 mp_.max_total_mem = ms->max_total_mem;
602 mp_.n_mmaps = ms->n_mmaps;
603 mp_.max_n_mmaps = ms->max_n_mmaps;
604 mp_.mmapped_mem = ms->mmapped_mem;
605 mp_.max_mmapped_mem = ms->max_mmapped_mem;
606 /* add version-dependent code here */
607 if (ms->version >= 1) {
608 /* Check whether it is safe to enable malloc checking, or whether
609 it is necessary to disable it. */
610 if (ms->using_malloc_checking && !using_malloc_checking &&
611 !disallow_malloc_check)
612 __malloc_check_init ();
613 else if (!ms->using_malloc_checking && using_malloc_checking) {
618 using_malloc_checking = 0;
621 check_malloc_state(&main_arena);
623 (void)mutex_unlock(&main_arena.mutex);