1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996, 1997 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wmglo@dent.med.uni-muenchen.de>
5 and Doug Lea <dl@cs.oswego.edu>, 1996.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Library General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Library General Public License for more details.
17 You should have received a copy of the GNU Library General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* V2.6.4-pt3 Thu Feb 20 1997
24 This work is mainly derived from malloc-2.6.4 by Doug Lea
25 <dl@cs.oswego.edu>, which is available from:
27 ftp://g.oswego.edu/pub/misc/malloc.c
29 Most of the original comments are reproduced in the code below.
31 * Why use this malloc?
33 This is not the fastest, most space-conserving, most portable, or
34 most tunable malloc ever written. However it is among the fastest
35 while also being among the most space-conserving, portable and tunable.
36 Consistent balance across these factors results in a good general-purpose
37 allocator. For a high-level description, see
38 http://g.oswego.edu/dl/html/malloc.html
40 On many systems, the standard malloc implementation is by itself not
41 thread-safe, and therefore wrapped with a single global lock around
42 all malloc-related functions. In some applications, especially with
43 multiple available processors, this can lead to contention problems
44 and bad performance. This malloc version was designed with the goal
45 to avoid waiting for locks as much as possible. Statistics indicate
46 that this goal is achieved in many cases.
48 * Synopsis of public routines
50 (Much fuller descriptions are contained in the program documentation below.)
53 Initialize global configuration. When compiled for multiple threads,
54 this function must be called once before any other function in the
55 package. It is not required otherwise. It is called automatically
56 in the Linux/GNU C libray or when compiling with MALLOC_HOOKS.
58 Return a pointer to a newly allocated chunk of at least n bytes, or null
59 if no space is available.
61 Release the chunk of memory pointed to by p, or no effect if p is null.
62 realloc(Void_t* p, size_t n);
63 Return a pointer to a chunk of size n that contains the same data
64 as does chunk p up to the minimum of (n, p's size) bytes, or null
65 if no space is available. The returned pointer may or may not be
66 the same as p. If p is null, equivalent to malloc. Unless the
67 #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
68 size argument of zero (re)allocates a minimum-sized chunk.
69 memalign(size_t alignment, size_t n);
70 Return a pointer to a newly allocated chunk of n bytes, aligned
71 in accord with the alignment argument, which must be a power of
74 Equivalent to memalign(pagesize, n), where pagesize is the page
75 size of the system (or as near to this as can be figured out from
76 all the includes/defines below.)
78 Equivalent to valloc(minimum-page-that-holds(n)), that is,
79 round up n to nearest pagesize.
80 calloc(size_t unit, size_t quantity);
81 Returns a pointer to quantity * unit bytes, with all locations
84 Equivalent to free(p).
85 malloc_trim(size_t pad);
86 Release all but pad bytes of freed top-most memory back
87 to the system. Return 1 if successful, else 0.
88 malloc_usable_size(Void_t* p);
89 Report the number usable allocated bytes associated with allocated
90 chunk p. This may or may not report more bytes than were requested,
91 due to alignment and minimum size constraints.
93 Prints brief summary statistics on stderr.
95 Returns (by copy) a struct containing various summary statistics.
96 mallopt(int parameter_number, int parameter_value)
97 Changes one of the tunable parameters described below. Returns
98 1 if successful in changing the parameter, else 0.
103 8 byte alignment is currently hardwired into the design. This
104 seems to suffice for all current machines and C compilers.
106 Assumed pointer representation: 4 or 8 bytes
107 Code for 8-byte pointers is untested by me but has worked
108 reliably by Wolfram Gloger, who contributed most of the
109 changes supporting this.
111 Assumed size_t representation: 4 or 8 bytes
112 Note that size_t is allowed to be 4 bytes even if pointers are 8.
114 Minimum overhead per allocated chunk: 4 or 8 bytes
115 Each malloced chunk has a hidden overhead of 4 bytes holding size
116 and status information.
118 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
119 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
121 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
122 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
123 needed; 4 (8) for a trailing size field
124 and 8 (16) bytes for free list pointers. Thus, the minimum
125 allocatable size is 16/24/32 bytes.
127 Even a request for zero bytes (i.e., malloc(0)) returns a
128 pointer to something of the minimum allocatable size.
130 Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes
131 8-byte size_t: 2^63 - 16 bytes
133 It is assumed that (possibly signed) size_t bit values suffice to
134 represent chunk sizes. `Possibly signed' is due to the fact
135 that `size_t' may be defined on a system as either a signed or
136 an unsigned type. To be conservative, values that would appear
137 as negative numbers are avoided.
138 Requests for sizes with a negative sign bit will return a
141 Maximum overhead wastage per allocated chunk: normally 15 bytes
143 Alignment demands, plus the minimum allocatable size restriction
144 make the normal worst-case wastage 15 bytes (i.e., up to 15
145 more bytes will be allocated than were requested in malloc), with
147 1. Because requests for zero bytes allocate non-zero space,
148 the worst case wastage for a request of zero bytes is 24 bytes.
149 2. For requests >= mmap_threshold that are serviced via
150 mmap(), the worst case wastage is 8 bytes plus the remainder
151 from a system page (the minimal mmap unit); typically 4096 bytes.
155 Here are some features that are NOT currently supported
157 * No automated mechanism for fully checking that all accesses
158 to malloced memory stay within their bounds.
159 * No support for compaction.
161 * Synopsis of compile-time options:
163 People have reported using previous versions of this malloc on all
164 versions of Unix, sometimes by tweaking some of the defines
165 below. It has been tested most extensively on Solaris and
166 Linux. People have also reported adapting this malloc for use in
167 stand-alone embedded systems.
169 The implementation is in straight, hand-tuned ANSI C. Among other
170 consequences, it uses a lot of macros. Because of this, to be at
171 all usable, this code should be compiled using an optimizing compiler
172 (for example gcc -O2) that can simplify expressions and control
175 __STD_C (default: derived from C compiler defines)
176 Nonzero if using ANSI-standard C compiler, a C++ compiler, or
177 a C compiler sufficiently close to ANSI to get away with it.
178 MALLOC_DEBUG (default: NOT defined)
179 Define to enable debugging. Adds fairly extensive assertion-based
180 checking to help track down memory errors, but noticeably slows down
182 MALLOC_HOOKS (default: NOT defined)
183 Define to enable support run-time replacement of the allocation
184 functions through user-defined `hooks'.
185 REALLOC_ZERO_BYTES_FREES (default: NOT defined)
186 Define this if you think that realloc(p, 0) should be equivalent
187 to free(p). Otherwise, since malloc returns a unique pointer for
188 malloc(0), so does realloc(p, 0).
189 HAVE_MEMCPY (default: defined)
190 Define if you are not otherwise using ANSI STD C, but still
191 have memcpy and memset in your C library and want to use them.
192 Otherwise, simple internal versions are supplied.
193 USE_MEMCPY (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
194 Define as 1 if you want the C library versions of memset and
195 memcpy called in realloc and calloc (otherwise macro versions are used).
196 At least on some platforms, the simple macro versions usually
197 outperform libc versions.
198 HAVE_MMAP (default: defined as 1)
199 Define to non-zero to optionally make malloc() use mmap() to
200 allocate very large blocks.
201 HAVE_MREMAP (default: defined as 0 unless Linux libc set)
202 Define to non-zero to optionally make realloc() use mremap() to
203 reallocate very large blocks.
204 malloc_getpagesize (default: derived from system #includes)
205 Either a constant or routine call returning the system page size.
206 HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
207 Optionally define if you are on a system with a /usr/include/malloc.h
208 that declares struct mallinfo. It is not at all necessary to
209 define this even if you do, but will ensure consistency.
210 INTERNAL_SIZE_T (default: size_t)
211 Define to a 32-bit type (probably `unsigned int') if you are on a
212 64-bit machine, yet do not want or need to allow malloc requests of
213 greater than 2^31 to be handled. This saves space, especially for
215 _LIBC (default: NOT defined)
216 Defined only when compiled as part of the Linux libc/glibc.
217 Also note that there is some odd internal name-mangling via defines
218 (for example, internally, `malloc' is named `mALLOc') needed
219 when compiling in this case. These look funny but don't otherwise
221 LACKS_UNISTD_H (default: undefined)
222 Define this if your system does not have a <unistd.h>.
223 MORECORE (default: sbrk)
224 The name of the routine to call to obtain more memory from the system.
225 MORECORE_FAILURE (default: -1)
226 The value returned upon failure of MORECORE.
227 MORECORE_CLEARS (default 1)
228 True (1) if the routine mapped to MORECORE zeroes out memory (which
230 DEFAULT_TRIM_THRESHOLD
232 DEFAULT_MMAP_THRESHOLD
234 Default values of tunable parameters (described in detail below)
235 controlling interaction with host system routines (sbrk, mmap, etc).
236 These values may also be changed dynamically via mallopt(). The
237 preset defaults are those that give best performance for typical
240 When the standard debugging hooks are in place, and a pointer is
241 detected as corrupt, do nothing (0), print an error message (1),
249 * Compile-time options for multiple threads:
251 USE_PTHREADS, USE_THR, USE_SPROC
252 Define one of these as 1 to select the thread interface:
253 POSIX threads, Solaris threads or SGI sproc's, respectively.
254 If none of these is defined as non-zero, you get a `normal'
255 malloc implementation which is not thread-safe. Support for
256 multiple threads requires HAVE_MMAP=1. As an exception, when
257 compiling for GNU libc, i.e. when _LIBC is defined, then none of
258 the USE_... symbols have to be defined.
262 When thread support is enabled, additional `heap's are created
263 with mmap calls. These are limited in size; HEAP_MIN_SIZE should
264 be a multiple of the page size, while HEAP_MAX_SIZE must be a power
265 of two for alignment reasons. HEAP_MAX_SIZE should be at least
266 twice as large as the mmap threshold.
268 When this is defined as non-zero, some statistics on mutex locking
279 #if defined (__STDC__)
286 #endif /*__cplusplus*/
299 # include <stddef.h> /* for size_t */
300 # if defined(_LIBC) || defined(MALLOC_HOOKS)
301 # include <stdlib.h> /* for getenv(), abort() */
304 # include <sys/types.h>
307 /* Macros for handling mutexes and thread-specific data. This is
308 included early, because some thread-related header files (such as
309 pthread.h) should be included before any others. */
310 #include "thread-m.h"
316 #include <stdio.h> /* needed for malloc_stats */
327 Because freed chunks may be overwritten with link fields, this
328 malloc will often die when freed memory is overwritten by user
329 programs. This can be very effective (albeit in an annoying way)
330 in helping track down dangling pointers.
332 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
333 enabled that will catch more memory errors. You probably won't be
334 able to make much sense of the actual assertion errors, but they
335 should help you locate incorrectly overwritten memory. The
336 checking is fairly extensive, and will slow down execution
337 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set will
338 attempt to check every non-mmapped allocated and free chunk in the
339 course of computing the summaries. (By nature, mmapped regions
340 cannot be checked very much automatically.)
342 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
343 this code. The assertions in the check routines spell out in more
344 detail the assumptions and invariants underlying the algorithms.
351 #define assert(x) ((void)0)
356 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
357 of chunk sizes. On a 64-bit machine, you can reduce malloc
358 overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
359 at the expense of not being able to handle requests greater than
360 2^31. This limitation is hardly ever a concern; you are encouraged
361 to set this. However, the default version is the same as size_t.
364 #ifndef INTERNAL_SIZE_T
365 #define INTERNAL_SIZE_T size_t
369 REALLOC_ZERO_BYTES_FREES should be set if a call to
370 realloc with zero bytes should be the same as a call to free.
371 Some people think it should. Otherwise, since this malloc
372 returns a unique pointer for malloc(0), so does realloc(p, 0).
376 /* #define REALLOC_ZERO_BYTES_FREES */
380 HAVE_MEMCPY should be defined if you are not otherwise using
381 ANSI STD C, but still have memcpy and memset in your C library
382 and want to use them in calloc and realloc. Otherwise simple
383 macro versions are defined here.
385 USE_MEMCPY should be defined as 1 if you actually want to
386 have memset and memcpy called. People report that the macro
387 versions are often enough faster than libc versions on many
388 systems that it is better to use them.
392 #define HAVE_MEMCPY 1
402 #if (__STD_C || defined(HAVE_MEMCPY))
405 void* memset(void*, int, size_t);
406 void* memcpy(void*, const void*, size_t);
415 /* The following macros are only invoked with (2n+1)-multiples of
416 INTERNAL_SIZE_T units, with a positive integer n. This is exploited
417 for fast inline execution when n is small. */
419 #define MALLOC_ZERO(charp, nbytes) \
421 INTERNAL_SIZE_T mzsz = (nbytes); \
422 if(mzsz <= 9*sizeof(mzsz)) { \
423 INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \
424 if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \
426 if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \
428 if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \
433 } else memset((charp), 0, mzsz); \
436 #define MALLOC_COPY(dest,src,nbytes) \
438 INTERNAL_SIZE_T mcsz = (nbytes); \
439 if(mcsz <= 9*sizeof(mcsz)) { \
440 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \
441 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \
442 if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
443 *mcdst++ = *mcsrc++; \
444 if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
445 *mcdst++ = *mcsrc++; \
446 if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
447 *mcdst++ = *mcsrc++; }}} \
448 *mcdst++ = *mcsrc++; \
449 *mcdst++ = *mcsrc++; \
451 } else memcpy(dest, src, mcsz); \
454 #else /* !USE_MEMCPY */
456 /* Use Duff's device for good zeroing/copying performance. */
458 #define MALLOC_ZERO(charp, nbytes) \
460 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
461 long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \
462 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
464 case 0: for(;;) { *mzp++ = 0; \
465 case 7: *mzp++ = 0; \
466 case 6: *mzp++ = 0; \
467 case 5: *mzp++ = 0; \
468 case 4: *mzp++ = 0; \
469 case 3: *mzp++ = 0; \
470 case 2: *mzp++ = 0; \
471 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
475 #define MALLOC_COPY(dest,src,nbytes) \
477 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
478 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
479 long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \
480 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
482 case 0: for(;;) { *mcdst++ = *mcsrc++; \
483 case 7: *mcdst++ = *mcsrc++; \
484 case 6: *mcdst++ = *mcsrc++; \
485 case 5: *mcdst++ = *mcsrc++; \
486 case 4: *mcdst++ = *mcsrc++; \
487 case 3: *mcdst++ = *mcsrc++; \
488 case 2: *mcdst++ = *mcsrc++; \
489 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
497 Define HAVE_MMAP to optionally make malloc() use mmap() to
498 allocate very large blocks. These will be returned to the
499 operating system immediately after a free().
507 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
508 large blocks. This is currently only possible on Linux with
509 kernel versions newer than 1.3.77.
513 #define HAVE_MREMAP defined(__linux__)
520 #include <sys/mman.h>
522 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
523 #define MAP_ANONYMOUS MAP_ANON
526 #endif /* HAVE_MMAP */
529 Access to system page size. To the extent possible, this malloc
530 manages memory from the system in page-size units.
532 The following mechanics for getpagesize were adapted from
533 bsd/gnu getpagesize.h
536 #ifndef LACKS_UNISTD_H
540 #ifndef malloc_getpagesize
541 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
542 # ifndef _SC_PAGE_SIZE
543 # define _SC_PAGE_SIZE _SC_PAGESIZE
546 # ifdef _SC_PAGE_SIZE
547 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
549 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
550 extern size_t getpagesize();
551 # define malloc_getpagesize getpagesize()
553 # include <sys/param.h>
554 # ifdef EXEC_PAGESIZE
555 # define malloc_getpagesize EXEC_PAGESIZE
559 # define malloc_getpagesize NBPG
561 # define malloc_getpagesize (NBPG * CLSIZE)
565 # define malloc_getpagesize NBPC
568 # define malloc_getpagesize PAGESIZE
570 # define malloc_getpagesize (4096) /* just guess */
583 This version of malloc supports the standard SVID/XPG mallinfo
584 routine that returns a struct containing the same kind of
585 information you can get from malloc_stats. It should work on
586 any SVID/XPG compliant system that has a /usr/include/malloc.h
587 defining struct mallinfo. (If you'd like to install such a thing
588 yourself, cut out the preliminary declarations as described above
589 and below and save them in a malloc.h file. But there's no
590 compelling reason to bother to do this.)
592 The main declaration needed is the mallinfo struct that is returned
593 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
594 bunch of fields, most of which are not even meaningful in this
595 version of malloc. Some of these fields are are instead filled by
596 mallinfo() with other numbers that might possibly be of interest.
598 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
599 /usr/include/malloc.h file that includes a declaration of struct
600 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
601 version is declared below. These must be precisely the same for
606 /* #define HAVE_USR_INCLUDE_MALLOC_H */
608 #if HAVE_USR_INCLUDE_MALLOC_H
609 # include "/usr/include/malloc.h"
614 # include "ptmalloc.h"
620 #ifndef DEFAULT_TRIM_THRESHOLD
621 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
625 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
626 to keep before releasing via malloc_trim in free().
628 Automatic trimming is mainly useful in long-lived programs.
629 Because trimming via sbrk can be slow on some systems, and can
630 sometimes be wasteful (in cases where programs immediately
631 afterward allocate more large chunks) the value should be high
632 enough so that your overall system performance would improve by
635 The trim threshold and the mmap control parameters (see below)
636 can be traded off with one another. Trimming and mmapping are
637 two different ways of releasing unused memory back to the
638 system. Between these two, it is often possible to keep
639 system-level demands of a long-lived program down to a bare
640 minimum. For example, in one test suite of sessions measuring
641 the XF86 X server on Linux, using a trim threshold of 128K and a
642 mmap threshold of 192K led to near-minimal long term resource
645 If you are using this malloc in a long-lived program, it should
646 pay to experiment with these values. As a rough guide, you
647 might set to a value close to the average size of a process
648 (program) running on your system. Releasing this much memory
649 would allow such a process to run in memory. Generally, it's
650 worth it to tune for trimming rather than memory mapping when a
651 program undergoes phases where several large chunks are
652 allocated and released in ways that can reuse each other's
653 storage, perhaps mixed with phases where there are no such
654 chunks at all. And in well-behaved long-lived programs,
655 controlling release of large blocks via trimming versus mapping
658 However, in most programs, these parameters serve mainly as
659 protection against the system-level effects of carrying around
660 massive amounts of unneeded memory. Since frequent calls to
661 sbrk, mmap, and munmap otherwise degrade performance, the default
662 parameters are set to relatively high values that serve only as
665 The default trim value is high enough to cause trimming only in
666 fairly extreme (by current memory consumption standards) cases.
667 It must be greater than page size to have any useful effect. To
668 disable trimming completely, you can set to (unsigned long)(-1);
674 #ifndef DEFAULT_TOP_PAD
675 #define DEFAULT_TOP_PAD (0)
679 M_TOP_PAD is the amount of extra `padding' space to allocate or
680 retain whenever sbrk is called. It is used in two ways internally:
682 * When sbrk is called to extend the top of the arena to satisfy
683 a new malloc request, this much padding is added to the sbrk
686 * When malloc_trim is called automatically from free(),
687 it is used as the `pad' argument.
689 In both cases, the actual amount of padding is rounded
690 so that the end of the arena is always a system page boundary.
692 The main reason for using padding is to avoid calling sbrk so
693 often. Having even a small pad greatly reduces the likelihood
694 that nearly every malloc request during program start-up (or
695 after trimming) will invoke sbrk, which needlessly wastes
698 Automatic rounding-up to page-size units is normally sufficient
699 to avoid measurable overhead, so the default is 0. However, in
700 systems where sbrk is relatively slow, it can pay to increase
701 this value, at the expense of carrying around more memory than
707 #ifndef DEFAULT_MMAP_THRESHOLD
708 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
713 M_MMAP_THRESHOLD is the request size threshold for using mmap()
714 to service a request. Requests of at least this size that cannot
715 be allocated using already-existing space will be serviced via mmap.
716 (If enough normal freed space already exists it is used instead.)
718 Using mmap segregates relatively large chunks of memory so that
719 they can be individually obtained and released from the host
720 system. A request serviced through mmap is never reused by any
721 other request (at least not directly; the system may just so
722 happen to remap successive requests to the same locations).
724 Segregating space in this way has the benefit that mmapped space
725 can ALWAYS be individually released back to the system, which
726 helps keep the system level memory demands of a long-lived
727 program low. Mapped memory can never become `locked' between
728 other chunks, as can happen with normally allocated chunks, which
729 menas that even trimming via malloc_trim would not release them.
731 However, it has the disadvantages that:
733 1. The space cannot be reclaimed, consolidated, and then
734 used to service later requests, as happens with normal chunks.
735 2. It can lead to more wastage because of mmap page alignment
737 3. It causes malloc performance to be more dependent on host
738 system memory management support routines which may vary in
739 implementation quality and may impose arbitrary
740 limitations. Generally, servicing a request via normal
741 malloc steps is faster than going through a system's mmap.
743 All together, these considerations should lead you to use mmap
744 only for relatively large requests.
751 #ifndef DEFAULT_MMAP_MAX
753 #define DEFAULT_MMAP_MAX (1024)
755 #define DEFAULT_MMAP_MAX (0)
760 M_MMAP_MAX is the maximum number of requests to simultaneously
761 service using mmap. This parameter exists because:
763 1. Some systems have a limited number of internal tables for
765 2. In most systems, overreliance on mmap can degrade overall
767 3. If a program allocates many large regions, it is probably
768 better off using normal sbrk-based allocation routines that
769 can reclaim and reallocate normal heap memory. Using a
770 small value allows transition into this mode after the
771 first few allocations.
773 Setting to 0 disables all use of mmap. If HAVE_MMAP is not set,
774 the default value is 0, and attempts to set it to non-zero values
775 in mallopt will fail.
780 #ifndef DEFAULT_CHECK_ACTION
781 #define DEFAULT_CHECK_ACTION 1
784 /* What to do if the standard debugging hooks are in place and a
785 corrupt pointer is detected: do nothing (0), print an error message
786 (1), or call abort() (2). */
790 #define HEAP_MIN_SIZE (32*1024)
791 #define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
793 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
794 that are dynamically created for multi-threaded programs. The
795 maximum size must be a power of two, for fast determination of
796 which heap belongs to a chunk. It should be much larger than
797 the mmap threshold, so that requests with a size just below that
798 threshold can be fulfilled without creating too many heaps.
804 #define THREAD_STATS 0
807 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
813 Special defines for the Linux/GNU C library.
822 Void_t * __default_morecore (ptrdiff_t);
823 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
827 Void_t * __default_morecore ();
828 Void_t *(*__morecore)() = __default_morecore;
832 #define MORECORE (*__morecore)
833 #define MORECORE_FAILURE 0
834 #define MORECORE_CLEARS 1
836 #define munmap __munmap
837 #define mremap __mremap
838 #define mprotect __mprotect
839 #undef malloc_getpagesize
840 #define malloc_getpagesize __getpagesize()
845 extern Void_t* sbrk(ptrdiff_t);
847 extern Void_t* sbrk();
851 #define MORECORE sbrk
854 #ifndef MORECORE_FAILURE
855 #define MORECORE_FAILURE -1
858 #ifndef MORECORE_CLEARS
859 #define MORECORE_CLEARS 1
866 #define cALLOc __libc_calloc
867 #define fREe __libc_free
868 #define mALLOc __libc_malloc
869 #define mEMALIGn __libc_memalign
870 #define rEALLOc __libc_realloc
871 #define vALLOc __libc_valloc
872 #define pvALLOc __libc_pvalloc
873 #define mALLINFo __libc_mallinfo
874 #define mALLOPt __libc_mallopt
875 #define mALLOC_STATs __malloc_stats
876 #define mALLOC_USABLE_SIZe __malloc_usable_size
877 #define mALLOC_TRIm __malloc_trim
878 #define mALLOC_GET_STATe __malloc_get_state
879 #define mALLOC_SET_STATe __malloc_set_state
883 #define cALLOc calloc
885 #define mALLOc malloc
886 #define mEMALIGn memalign
887 #define rEALLOc realloc
888 #define vALLOc valloc
889 #define pvALLOc pvalloc
890 #define mALLINFo mallinfo
891 #define mALLOPt mallopt
892 #define mALLOC_STATs malloc_stats
893 #define mALLOC_USABLE_SIZe malloc_usable_size
894 #define mALLOC_TRIm malloc_trim
895 #define mALLOC_GET_STATe malloc_get_state
896 #define mALLOC_SET_STATe malloc_set_state
900 /* Public routines */
905 void ptmalloc_init(void);
907 Void_t* mALLOc(size_t);
909 Void_t* rEALLOc(Void_t*, size_t);
910 Void_t* mEMALIGn(size_t, size_t);
911 Void_t* vALLOc(size_t);
912 Void_t* pvALLOc(size_t);
913 Void_t* cALLOc(size_t, size_t);
915 int mALLOC_TRIm(size_t);
916 size_t mALLOC_USABLE_SIZe(Void_t*);
917 void mALLOC_STATs(void);
918 int mALLOPt(int, int);
919 struct mallinfo mALLINFo(void);
920 Void_t* mALLOC_GET_STATe(void);
921 int mALLOC_SET_STATe(Void_t*);
926 void ptmalloc_init();
937 size_t mALLOC_USABLE_SIZe();
940 struct mallinfo mALLINFo();
941 Void_t* mALLOC_GET_STATe();
942 int mALLOC_SET_STATe();
948 }; /* end of extern "C" */
951 #if !defined(NO_THREADS) && !HAVE_MMAP
952 "Can't have threads support without mmap"
963 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
964 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
965 struct malloc_chunk* fd; /* double links -- used only if free. */
966 struct malloc_chunk* bk;
969 typedef struct malloc_chunk* mchunkptr;
973 malloc_chunk details:
975 (The following includes lightly edited explanations by Colin Plumb.)
977 Chunks of memory are maintained using a `boundary tag' method as
978 described in e.g., Knuth or Standish. (See the paper by Paul
979 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
980 survey of such techniques.) Sizes of free chunks are stored both
981 in the front of each chunk and at the end. This makes
982 consolidating fragmented chunks into bigger chunks very fast. The
983 size fields also hold bits representing whether chunks are free or
986 An allocated chunk looks like this:
989 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
990 | Size of previous chunk, if allocated | |
991 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
992 | Size of chunk, in bytes |P|
993 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
994 | User data starts here... .
996 . (malloc_usable_space() bytes) .
998 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1000 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1003 Where "chunk" is the front of the chunk for the purpose of most of
1004 the malloc code, but "mem" is the pointer that is returned to the
1005 user. "Nextchunk" is the beginning of the next contiguous chunk.
1007 Chunks always begin on even word boundaries, so the mem portion
1008 (which is returned to the user) is also on an even word boundary, and
1009 thus double-word aligned.
1011 Free chunks are stored in circular doubly-linked lists, and look like this:
1013 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1014 | Size of previous chunk |
1015 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1016 `head:' | Size of chunk, in bytes |P|
1017 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1018 | Forward pointer to next chunk in list |
1019 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1020 | Back pointer to previous chunk in list |
1021 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1022 | Unused space (may be 0 bytes long) .
1025 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1026 `foot:' | Size of chunk, in bytes |
1027 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1029 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1030 chunk size (which is always a multiple of two words), is an in-use
1031 bit for the *previous* chunk. If that bit is *clear*, then the
1032 word before the current chunk size contains the previous chunk
1033 size, and can be used to find the front of the previous chunk.
1034 (The very first chunk allocated always has this bit set,
1035 preventing access to non-existent (or non-owned) memory.)
1037 Note that the `foot' of the current chunk is actually represented
1038 as the prev_size of the NEXT chunk. (This makes it easier to
1039 deal with alignments etc).
1041 The two exceptions to all this are
1043 1. The special chunk `top', which doesn't bother using the
1044 trailing size field since there is no
1045 next contiguous chunk that would have to index off it. (After
1046 initialization, `top' is forced to always exist. If it would
1047 become less than MINSIZE bytes long, it is replenished via
1050 2. Chunks allocated via mmap, which have the second-lowest-order
1051 bit (IS_MMAPPED) set in their size fields. Because they are
1052 never merged or traversed from any other chunk, they have no
1053 foot size or inuse information.
1055 Available chunks are kept in any of several places (all declared below):
1057 * `av': An array of chunks serving as bin headers for consolidated
1058 chunks. Each bin is doubly linked. The bins are approximately
1059 proportionally (log) spaced. There are a lot of these bins
1060 (128). This may look excessive, but works very well in
1061 practice. All procedures maintain the invariant that no
1062 consolidated chunk physically borders another one. Chunks in
1063 bins are kept in size order, with ties going to the
1064 approximately least recently used chunk.
1066 The chunks in each bin are maintained in decreasing sorted order by
1067 size. This is irrelevant for the small bins, which all contain
1068 the same-sized chunks, but facilitates best-fit allocation for
1069 larger chunks. (These lists are just sequential. Keeping them in
1070 order almost never requires enough traversal to warrant using
1071 fancier ordered data structures.) Chunks of the same size are
1072 linked with the most recently freed at the front, and allocations
1073 are taken from the back. This results in LRU or FIFO allocation
1074 order, which tends to give each chunk an equal opportunity to be
1075 consolidated with adjacent freed chunks, resulting in larger free
1076 chunks and less fragmentation.
1078 * `top': The top-most available chunk (i.e., the one bordering the
1079 end of available memory) is treated specially. It is never
1080 included in any bin, is used only if no other chunk is
1081 available, and is released back to the system if it is very
1082 large (see M_TRIM_THRESHOLD).
1084 * `last_remainder': A bin holding only the remainder of the
1085 most recently split (non-top) chunk. This bin is checked
1086 before other non-fitting chunks, so as to provide better
1087 locality for runs of sequentially allocated chunks.
1089 * Implicitly, through the host system's memory mapping tables.
1090 If supported, requests greater than a threshold are usually
1091 serviced via calls to mmap, and then later released via munmap.
1098 The bins are an array of pairs of pointers serving as the
1099 heads of (initially empty) doubly-linked lists of chunks, laid out
1100 in a way so that each pair can be treated as if it were in a
1101 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
1102 and chunks are the same).
1104 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1105 8 bytes apart. Larger bins are approximately logarithmically
1106 spaced. (See the table below.)
1114 4 bins of size 32768
1115 2 bins of size 262144
1116 1 bin of size what's left
1118 There is actually a little bit of slop in the numbers in bin_index
1119 for the sake of speed. This makes no difference elsewhere.
1121 The special chunks `top' and `last_remainder' get their own bins,
1122 (this is implemented via yet more trickery with the av array),
1123 although `top' is never properly linked to its bin since it is
1124 always handled specially.
1128 #define NAV 128 /* number of bins */
1130 typedef struct malloc_chunk* mbinptr;
1132 /* An arena is a configuration of malloc_chunks together with an array
1133 of bins. With multiple threads, it must be locked via a mutex
1134 before changing its data structures. One or more `heaps' are
1135 associated with each arena, except for the main_arena, which is
1136 associated only with the `main heap', i.e. the conventional free
1137 store obtained with calls to MORECORE() (usually sbrk). The `av'
1138 array is never mentioned directly in the code, but instead used via
1139 bin access macros. */
1141 typedef struct _arena {
1142 mbinptr av[2*NAV + 2];
1143 struct _arena *next;
1146 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
1152 /* A heap is a single contiguous memory region holding (coalesceable)
1153 malloc_chunks. It is allocated with mmap() and always starts at an
1154 address aligned to HEAP_MAX_SIZE. Not used unless compiling for
1155 multiple threads. */
1157 typedef struct _heap_info {
1158 arena *ar_ptr; /* Arena for this heap. */
1159 struct _heap_info *prev; /* Previous heap. */
1160 size_t size; /* Current size in bytes. */
1161 size_t pad; /* Make sure the following data is properly aligned. */
1166 Static functions (forward declarations)
1171 static void chunk_free(arena *ar_ptr, mchunkptr p);
1172 static mchunkptr chunk_alloc(arena *ar_ptr, INTERNAL_SIZE_T size);
1173 static mchunkptr chunk_realloc(arena *ar_ptr, mchunkptr oldp,
1174 INTERNAL_SIZE_T oldsize, INTERNAL_SIZE_T nb);
1175 static mchunkptr chunk_align(arena *ar_ptr, INTERNAL_SIZE_T nb,
1177 static int main_trim(size_t pad);
1179 static int heap_trim(heap_info *heap, size_t pad);
1182 static Void_t* malloc_check(size_t sz, const Void_t *caller);
1183 static void free_check(Void_t* mem, const Void_t *caller);
1184 static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
1185 const Void_t *caller);
1186 static Void_t* memalign_check(size_t alignment, size_t bytes,
1187 const Void_t *caller);
1188 static Void_t* malloc_starter(size_t sz, const Void_t *caller);
1189 static void free_starter(Void_t* mem, const Void_t *caller);
1190 static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
1191 static void free_atfork(Void_t* mem, const Void_t *caller);
1194 static Void_t* malloc_check(size_t sz);
1195 static void free_check(Void_t* mem);
1196 static Void_t* realloc_check(Void_t* oldmem, size_t bytes);
1197 static Void_t* memalign_check(size_t alignment, size_t bytes);
1198 static Void_t* malloc_starter(size_t sz);
1199 static void free_starter(Void_t* mem);
1200 static Void_t* malloc_atfork(size_t sz);
1201 static void free_atfork(Void_t* mem);
1207 static void chunk_free();
1208 static mchunkptr chunk_alloc();
1209 static mchunkptr chunk_realloc();
1210 static mchunkptr chunk_align();
1211 static int main_trim();
1213 static int heap_trim();
1215 #if defined(_LIBC) || defined(MALLOC_HOOKS)
1216 static Void_t* malloc_check();
1217 static void free_check();
1218 static Void_t* realloc_check();
1219 static Void_t* memalign_check();
1220 static Void_t* malloc_starter();
1221 static void free_starter();
1222 static Void_t* malloc_atfork();
1223 static void free_atfork();
1230 /* sizes, alignments */
1232 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
1233 #define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
1234 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
1235 #define MINSIZE (sizeof(struct malloc_chunk))
1237 /* conversion from malloc headers to user pointers, and back */
1239 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1240 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1242 /* pad request bytes into a usable size */
1244 #define request2size(req) \
1245 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
1246 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
1247 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
1249 /* Check if m has acceptable alignment */
1251 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1257 Physical chunk operations
1261 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1263 #define PREV_INUSE 0x1
1265 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1267 #define IS_MMAPPED 0x2
1269 /* Bits to mask off when extracting size */
1271 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
1274 /* Ptr to next physical malloc_chunk. */
1276 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
1278 /* Ptr to previous physical malloc_chunk */
1280 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1283 /* Treat space at ptr + offset as a chunk */
1285 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1291 Dealing with use bits
1294 /* extract p's inuse bit */
1297 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
1299 /* extract inuse bit of previous chunk */
1301 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1303 /* check for mmap()'ed chunk */
1305 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1307 /* set/clear chunk as in use without otherwise disturbing */
1309 #define set_inuse(p) \
1310 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
1312 #define clear_inuse(p) \
1313 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
1315 /* check/set/clear inuse bits in known places */
1317 #define inuse_bit_at_offset(p, s)\
1318 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1320 #define set_inuse_bit_at_offset(p, s)\
1321 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1323 #define clear_inuse_bit_at_offset(p, s)\
1324 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1330 Dealing with size fields
1333 /* Get size, ignoring use bits */
1335 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1337 /* Set size at head, without disturbing its use bit */
1339 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
1341 /* Set size/use ignoring previous bits in header */
1343 #define set_head(p, s) ((p)->size = (s))
1345 /* Set size at footer (only when chunk is not in use) */
1347 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1355 #define bin_at(a, i) ((mbinptr)((char*)&(((a)->av)[2*(i) + 2]) - 2*SIZE_SZ))
1356 #define init_bin(a, i) ((a)->av[2*i+2] = (a)->av[2*i+3] = bin_at((a), i))
1357 #define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
1358 #define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
1361 The first 2 bins are never indexed. The corresponding av cells are instead
1362 used for bookkeeping. This is not to save space, but to simplify
1363 indexing, maintain locality, and avoid some initialization tests.
1366 #define binblocks(a) (bin_at(a,0)->size)/* bitvector of nonempty blocks */
1367 #define top(a) (bin_at(a,0)->fd) /* The topmost chunk */
1368 #define last_remainder(a) (bin_at(a,1)) /* remainder from last split */
1371 Because top initially points to its own bin with initial
1372 zero size, thus forcing extension on the first malloc request,
1373 we avoid having any special code in malloc to check whether
1374 it even exists yet. But we still need to in malloc_extend_top.
1377 #define initial_top(a) ((mchunkptr)bin_at(a, 0))
1381 /* field-extraction macros */
1383 #define first(b) ((b)->fd)
1384 #define last(b) ((b)->bk)
1390 #define bin_index(sz) \
1391 (((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
1392 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
1393 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
1394 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
1395 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
1396 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
1399 bins for chunks < 512 are all spaced 8 bytes apart, and hold
1400 identically sized chunks. This is exploited in malloc.
1403 #define MAX_SMALLBIN 63
1404 #define MAX_SMALLBIN_SIZE 512
1405 #define SMALLBIN_WIDTH 8
1407 #define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
1410 Requests are `small' if both the corresponding and the next bin are small
1413 #define is_small_request(nb) ((nb) < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
1418 To help compensate for the large number of bins, a one-level index
1419 structure is used for bin-by-bin searching. `binblocks' is a
1420 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
1421 have any (possibly) non-empty bins, so they can be skipped over
1422 all at once during during traversals. The bits are NOT always
1423 cleared as soon as all bins in a block are empty, but instead only
1424 when all are noticed to be empty during traversal in malloc.
1427 #define BINBLOCKWIDTH 4 /* bins per block */
1429 /* bin<->block macros */
1431 #define idx2binblock(ix) ((unsigned)1 << ((ix) / BINBLOCKWIDTH))
1432 #define mark_binblock(a, ii) (binblocks(a) |= idx2binblock(ii))
1433 #define clear_binblock(a, ii) (binblocks(a) &= ~(idx2binblock(ii)))
1438 /* Static bookkeeping data */
1440 /* Helper macro to initialize bins */
1441 #define IAV(i) bin_at(&main_arena, i), bin_at(&main_arena, i)
1443 static arena main_arena = {
1446 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
1447 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
1448 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
1449 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
1450 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
1451 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
1452 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
1453 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
1454 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
1455 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
1456 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
1457 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
1458 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
1459 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
1460 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
1461 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
1463 &main_arena, /* next */
1466 0, 0, 0, /* stat_lock_direct, stat_lock_loop, stat_lock_wait */
1468 MUTEX_INITIALIZER /* mutex */
1473 /* Thread specific data */
1476 static tsd_key_t arena_key;
1477 static mutex_t list_lock = MUTEX_INITIALIZER;
1481 static int stat_n_heaps = 0;
1482 #define THREAD_STAT(x) x
1484 #define THREAD_STAT(x) do ; while(0)
1487 /* variables holding tunable values */
1489 static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
1490 static unsigned long top_pad = DEFAULT_TOP_PAD;
1491 static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
1492 static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
1493 static int check_action = DEFAULT_CHECK_ACTION;
1495 /* The first value returned from sbrk */
1496 static char* sbrk_base = (char*)(-1);
1498 /* The maximum memory obtained from system via sbrk */
1499 static unsigned long max_sbrked_mem = 0;
1501 /* The maximum via either sbrk or mmap (too difficult to track with threads) */
1503 static unsigned long max_total_mem = 0;
1506 /* The total memory obtained from system via sbrk */
1507 #define sbrked_mem (main_arena.size)
1509 /* Tracking mmaps */
1511 static unsigned int n_mmaps = 0;
1512 static unsigned int max_n_mmaps = 0;
1513 static unsigned long mmapped_mem = 0;
1514 static unsigned long max_mmapped_mem = 0;
1519 #define weak_variable
1521 /* In GNU libc we want the hook variables to be weak definitions to
1522 avoid a problem with Emacs. */
1523 #define weak_variable weak_function
1526 /* Already initialized? */
1527 int __malloc_initialized = 0;
1530 /* The following two functions are registered via thread_atfork() to
1531 make sure that the mutexes remain in a consistent state in the
1532 fork()ed version of a thread. Also adapt the malloc and free hooks
1533 temporarily, because the `atfork' handler mechanism may use
1534 malloc/free internally (e.g. in LinuxThreads). */
1537 static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size,
1538 const __malloc_ptr_t));
1539 static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
1540 const __malloc_ptr_t));
1541 static Void_t* save_arena;
1544 static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size));
1545 static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr));
1546 static Void_t* save_arena;
1551 ptmalloc_lock_all __MALLOC_P((void))
1555 (void)mutex_lock(&list_lock);
1556 for(ar_ptr = &main_arena;;) {
1557 (void)mutex_lock(&ar_ptr->mutex);
1558 ar_ptr = ar_ptr->next;
1559 if(ar_ptr == &main_arena) break;
1561 #if defined(_LIBC) || defined(MALLOC_HOOKS)
1562 save_malloc_hook = __malloc_hook;
1563 save_free_hook = __free_hook;
1564 __malloc_hook = malloc_atfork;
1565 __free_hook = free_atfork;
1566 /* Only the current thread may perform malloc/free calls now. */
1567 tsd_getspecific(arena_key, save_arena);
1568 tsd_setspecific(arena_key, (Void_t*)0);
1573 ptmalloc_unlock_all __MALLOC_P((void))
1577 #if defined(_LIBC) || defined(MALLOC_HOOKS)
1578 tsd_setspecific(arena_key, save_arena);
1579 __malloc_hook = save_malloc_hook;
1580 __free_hook = save_free_hook;
1582 for(ar_ptr = &main_arena;;) {
1583 (void)mutex_unlock(&ar_ptr->mutex);
1584 ar_ptr = ar_ptr->next;
1585 if(ar_ptr == &main_arena) break;
1587 (void)mutex_unlock(&list_lock);
1590 /* Initialization routine. */
1593 static void ptmalloc_init __MALLOC_P ((void)) __attribute__ ((constructor));
1597 ptmalloc_init __MALLOC_P((void))
1600 ptmalloc_init __MALLOC_P((void))
1603 #if defined(_LIBC) || defined(MALLOC_HOOKS)
1607 if(__malloc_initialized) return;
1608 __malloc_initialized = 1;
1609 #if defined(_LIBC) || defined(MALLOC_HOOKS)
1610 /* With some threads implementations, creating thread-specific data
1611 or initializing a mutex may call malloc() itself. Provide a
1612 simple starter version (realloc() won't work). */
1613 save_malloc_hook = __malloc_hook;
1614 save_free_hook = __free_hook;
1615 __malloc_hook = malloc_starter;
1616 __free_hook = free_starter;
1618 #if defined(_LIBC) && !defined (NO_THREADS)
1619 /* Initialize the pthreads interface. */
1620 if (__pthread_initialize != NULL)
1621 __pthread_initialize();
1624 mutex_init(&main_arena.mutex);
1625 mutex_init(&list_lock);
1626 tsd_key_create(&arena_key, NULL);
1627 tsd_setspecific(arena_key, (Void_t *)&main_arena);
1628 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all);
1630 #if defined(_LIBC) || defined(MALLOC_HOOKS)
1631 if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
1632 mALLOPt(M_TRIM_THRESHOLD, atoi(s));
1633 if((s = getenv("MALLOC_TOP_PAD_")))
1634 mALLOPt(M_TOP_PAD, atoi(s));
1635 if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
1636 mALLOPt(M_MMAP_THRESHOLD, atoi(s));
1637 if((s = getenv("MALLOC_MMAP_MAX_")))
1638 mALLOPt(M_MMAP_MAX, atoi(s));
1639 s = getenv("MALLOC_CHECK_");
1640 __malloc_hook = save_malloc_hook;
1641 __free_hook = save_free_hook;
1643 if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
1644 __malloc_check_init();
1646 if(__malloc_initialize_hook != NULL)
1647 (*__malloc_initialize_hook)();
1651 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
1652 #ifdef thread_atfork_static
1653 thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
1654 ptmalloc_unlock_all)
1657 #if defined(_LIBC) || defined(MALLOC_HOOKS)
1659 /* Hooks for debugging versions. The initial hooks just call the
1660 initialization routine, then do the normal work. */
1664 malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
1667 malloc_hook_ini(size_t sz)
1669 malloc_hook_ini(sz) size_t sz;
1673 __malloc_hook = NULL;
1674 __realloc_hook = NULL;
1675 __memalign_hook = NULL;
1682 realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
1685 realloc_hook_ini(Void_t* ptr, size_t sz)
1687 realloc_hook_ini(ptr, sz) Void_t* ptr; size_t sz;
1691 __malloc_hook = NULL;
1692 __realloc_hook = NULL;
1693 __memalign_hook = NULL;
1695 return rEALLOc(ptr, sz);
1700 memalign_hook_ini(size_t sz, size_t alignment, const __malloc_ptr_t caller)
1703 memalign_hook_ini(size_t sz, size_t alignment)
1705 memalign_hook_ini(sz, alignment) size_t sz; size_t alignment;
1709 __malloc_hook = NULL;
1710 __realloc_hook = NULL;
1711 __memalign_hook = NULL;
1713 return mEMALIGn(sz, alignment);
1716 void weak_variable (*__malloc_initialize_hook) __MALLOC_P ((void)) = NULL;
1718 void weak_variable (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
1719 const __malloc_ptr_t)) = NULL;
1720 __malloc_ptr_t weak_variable (*__malloc_hook)
1721 __MALLOC_P ((size_t __size, const __malloc_ptr_t)) = malloc_hook_ini;
1722 __malloc_ptr_t weak_variable (*__realloc_hook)
1723 __MALLOC_P ((__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t))
1725 __malloc_ptr_t weak_variable (*__memalign_hook)
1726 __MALLOC_P ((size_t __size, size_t __alignment, const __malloc_ptr_t))
1727 = memalign_hook_ini;
1729 void weak_variable (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr)) = NULL;
1730 __malloc_ptr_t weak_variable (*__malloc_hook)
1731 __MALLOC_P ((size_t __size)) = malloc_hook_ini;
1732 __malloc_ptr_t weak_variable (*__realloc_hook)
1733 __MALLOC_P ((__malloc_ptr_t __ptr, size_t __size)) = realloc_hook_ini;
1734 __malloc_ptr_t weak_variable (*__memalign_hook)
1735 __MALLOC_P ((size_t __size, size_t __alignment)) = memalign_hook_ini;
1737 void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;
1739 /* Activate a standard set of debugging hooks. */
1741 __malloc_check_init()
1743 __malloc_hook = malloc_check;
1744 __free_hook = free_check;
1745 __realloc_hook = realloc_check;
1746 __memalign_hook = memalign_check;
1747 if(check_action == 1)
1748 fprintf(stderr, "malloc: using debugging hooks\n");
1757 /* Routines dealing with mmap(). */
1761 #ifndef MAP_ANONYMOUS
1763 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1765 #define MMAP(size, prot) ((dev_zero_fd < 0) ? \
1766 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1767 mmap(0, (size), (prot), MAP_PRIVATE, dev_zero_fd, 0)) : \
1768 mmap(0, (size), (prot), MAP_PRIVATE, dev_zero_fd, 0))
1772 #define MMAP(size, prot) \
1773 (mmap(0, (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS, -1, 0))
1778 static mchunkptr mmap_chunk(size_t size)
1780 static mchunkptr mmap_chunk(size) size_t size;
1783 size_t page_mask = malloc_getpagesize - 1;
1786 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
1788 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
1789 * there is no following chunk whose prev_size field could be used.
1791 size = (size + SIZE_SZ + page_mask) & ~page_mask;
1793 p = (mchunkptr)MMAP(size, PROT_READ|PROT_WRITE);
1794 if(p == (mchunkptr) MAP_FAILED) return 0;
1797 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1799 /* We demand that eight bytes into a page must be 8-byte aligned. */
1800 assert(aligned_OK(chunk2mem(p)));
1802 /* The offset to the start of the mmapped region is stored
1803 * in the prev_size field of the chunk; normally it is zero,
1804 * but that can be changed in memalign().
1807 set_head(p, size|IS_MMAPPED);
1809 mmapped_mem += size;
1810 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1811 max_mmapped_mem = mmapped_mem;
1813 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1814 max_total_mem = mmapped_mem + sbrked_mem;
1820 static void munmap_chunk(mchunkptr p)
1822 static void munmap_chunk(p) mchunkptr p;
1825 INTERNAL_SIZE_T size = chunksize(p);
1828 assert (chunk_is_mmapped(p));
1829 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1830 assert((n_mmaps > 0));
1831 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1834 mmapped_mem -= (size + p->prev_size);
1836 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1838 /* munmap returns non-zero on failure */
1845 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1847 static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1850 size_t page_mask = malloc_getpagesize - 1;
1851 INTERNAL_SIZE_T offset = p->prev_size;
1852 INTERNAL_SIZE_T size = chunksize(p);
1855 assert (chunk_is_mmapped(p));
1856 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1857 assert((n_mmaps > 0));
1858 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1860 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1861 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1863 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
1866 if (cp == (char *)-1) return 0;
1868 p = (mchunkptr)(cp + offset);
1870 assert(aligned_OK(chunk2mem(p)));
1872 assert((p->prev_size == offset));
1873 set_head(p, (new_size - offset)|IS_MMAPPED);
1875 mmapped_mem -= size + offset;
1876 mmapped_mem += new_size;
1877 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1878 max_mmapped_mem = mmapped_mem;
1880 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1881 max_total_mem = mmapped_mem + sbrked_mem;
1886 #endif /* HAVE_MREMAP */
1888 #endif /* HAVE_MMAP */
1892 /* Managing heaps and arenas (for concurrent threads) */
1896 /* Create a new heap. size is automatically rounded up to a multiple
1897 of the page size. */
1901 new_heap(size_t size)
1903 new_heap(size) size_t size;
1906 size_t page_mask = malloc_getpagesize - 1;
1911 if(size+top_pad < HEAP_MIN_SIZE)
1912 size = HEAP_MIN_SIZE;
1913 else if(size+top_pad <= HEAP_MAX_SIZE)
1915 else if(size > HEAP_MAX_SIZE)
1918 size = HEAP_MAX_SIZE;
1919 size = (size + page_mask) & ~page_mask;
1921 p1 = (char *)MMAP(HEAP_MAX_SIZE<<1, PROT_NONE);
1922 if(p1 == MAP_FAILED)
1924 p2 = (char *)(((unsigned long)p1 + HEAP_MAX_SIZE) & ~(HEAP_MAX_SIZE-1));
1927 munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
1928 if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
1929 munmap(p2, HEAP_MAX_SIZE);
1932 h = (heap_info *)p2;
1934 THREAD_STAT(stat_n_heaps++);
1938 /* Grow or shrink a heap. size is automatically rounded up to a
1939 multiple of the page size if it is positive. */
1943 grow_heap(heap_info *h, long diff)
1945 grow_heap(h, diff) heap_info *h; long diff;
1948 size_t page_mask = malloc_getpagesize - 1;
1952 diff = (diff + page_mask) & ~page_mask;
1953 new_size = (long)h->size + diff;
1954 if(new_size > HEAP_MAX_SIZE)
1956 if(mprotect((char *)h + h->size, diff, PROT_READ|PROT_WRITE) != 0)
1959 new_size = (long)h->size + diff;
1960 if(new_size < (long)sizeof(*h))
1962 if(mprotect((char *)h + new_size, -diff, PROT_NONE) != 0)
1969 /* Delete a heap. */
1971 #define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE)
1973 /* arena_get() acquires an arena and locks the corresponding mutex.
1974 First, try the one last locked successfully by this thread. (This
1975 is the common case and handled with a macro for speed.) Then, loop
1976 once over the circularly linked list of arenas. If no arena is
1977 readily available, create a new one. */
1979 #define arena_get(ptr, size) do { \
1980 Void_t *vptr = NULL; \
1981 ptr = (arena *)tsd_getspecific(arena_key, vptr); \
1982 if(ptr && !mutex_trylock(&ptr->mutex)) { \
1983 THREAD_STAT(++(ptr->stat_lock_direct)); \
1985 ptr = arena_get2(ptr, (size)); \
1990 arena_get2(arena *a_tsd, size_t size)
1992 arena_get2(a_tsd, size) arena *a_tsd; size_t size;
1999 unsigned long misalign;
2002 a = a_tsd = &main_arena;
2006 /* This can only happen while initializing the new arena. */
2007 (void)mutex_lock(&main_arena.mutex);
2008 THREAD_STAT(++(main_arena.stat_lock_wait));
2013 /* Check the global, circularly linked list for available arenas. */
2015 if(!mutex_trylock(&a->mutex)) {
2016 THREAD_STAT(++(a->stat_lock_loop));
2017 tsd_setspecific(arena_key, (Void_t *)a);
2021 } while(a != a_tsd);
2023 /* Nothing immediately available, so generate a new arena. */
2024 h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT));
2027 a = h->ar_ptr = (arena *)(h+1);
2028 for(i=0; i<NAV; i++)
2032 tsd_setspecific(arena_key, (Void_t *)a);
2033 mutex_init(&a->mutex);
2034 i = mutex_lock(&a->mutex); /* remember result */
2036 /* Set up the top chunk, with proper alignment. */
2037 ptr = (char *)(a + 1);
2038 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
2040 ptr += MALLOC_ALIGNMENT - misalign;
2041 top(a) = (mchunkptr)ptr;
2042 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
2044 /* Add the new arena to the list. */
2045 (void)mutex_lock(&list_lock);
2046 a->next = main_arena.next;
2047 main_arena.next = a;
2048 (void)mutex_unlock(&list_lock);
2050 if(i) /* locking failed; keep arena for further attempts later */
2053 THREAD_STAT(++(a->stat_lock_loop));
2057 /* find the heap and corresponding arena for a given ptr */
2059 #define heap_for_ptr(ptr) \
2060 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
2061 #define arena_for_ptr(ptr) \
2062 (((mchunkptr)(ptr) < top(&main_arena) && (char *)(ptr) >= sbrk_base) ? \
2063 &main_arena : heap_for_ptr(ptr)->ar_ptr)
2065 #else /* defined(NO_THREADS) */
2067 /* Without concurrent threads, there is only one arena. */
2069 #define arena_get(ptr, sz) (ptr = &main_arena)
2070 #define arena_for_ptr(ptr) (&main_arena)
2072 #endif /* !defined(NO_THREADS) */
2084 These routines make a number of assertions about the states
2085 of data structures that should be true at all times. If any
2086 are not true, it's very likely that a user program has somehow
2087 trashed memory. (It's also possible that there is a coding error
2088 in malloc. In which case, please report it!)
2092 static void do_check_chunk(arena *ar_ptr, mchunkptr p)
2094 static void do_check_chunk(ar_ptr, p) arena *ar_ptr; mchunkptr p;
2097 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2099 /* No checkable chunk is mmapped */
2100 assert(!chunk_is_mmapped(p));
2103 if(ar_ptr != &main_arena) {
2104 heap_info *heap = heap_for_ptr(p);
2105 assert(heap->ar_ptr == ar_ptr);
2106 assert((char *)p + sz <= (char *)heap + heap->size);
2111 /* Check for legal address ... */
2112 assert((char*)p >= sbrk_base);
2113 if (p != top(ar_ptr))
2114 assert((char*)p + sz <= (char*)top(ar_ptr));
2116 assert((char*)p + sz <= sbrk_base + sbrked_mem);
2122 static void do_check_free_chunk(arena *ar_ptr, mchunkptr p)
2124 static void do_check_free_chunk(ar_ptr, p) arena *ar_ptr; mchunkptr p;
2127 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2128 mchunkptr next = chunk_at_offset(p, sz);
2130 do_check_chunk(ar_ptr, p);
2132 /* Check whether it claims to be free ... */
2135 /* Must have OK size and fields */
2136 assert((long)sz >= (long)MINSIZE);
2137 assert((sz & MALLOC_ALIGN_MASK) == 0);
2138 assert(aligned_OK(chunk2mem(p)));
2139 /* ... matching footer field */
2140 assert(next->prev_size == sz);
2141 /* ... and is fully consolidated */
2142 assert(prev_inuse(p));
2143 assert (next == top(ar_ptr) || inuse(next));
2145 /* ... and has minimally sane links */
2146 assert(p->fd->bk == p);
2147 assert(p->bk->fd == p);
2151 static void do_check_inuse_chunk(arena *ar_ptr, mchunkptr p)
2153 static void do_check_inuse_chunk(ar_ptr, p) arena *ar_ptr; mchunkptr p;
2156 mchunkptr next = next_chunk(p);
2157 do_check_chunk(ar_ptr, p);
2159 /* Check whether it claims to be in use ... */
2162 /* ... whether its size is OK (it might be a fencepost) ... */
2163 assert(chunksize(p) >= MINSIZE || next->size == (0|PREV_INUSE));
2165 /* ... and is surrounded by OK chunks.
2166 Since more things can be checked with free chunks than inuse ones,
2167 if an inuse chunk borders them and debug is on, it's worth doing them.
2171 mchunkptr prv = prev_chunk(p);
2172 assert(next_chunk(prv) == p);
2173 do_check_free_chunk(ar_ptr, prv);
2175 if (next == top(ar_ptr))
2177 assert(prev_inuse(next));
2178 assert(chunksize(next) >= MINSIZE);
2180 else if (!inuse(next))
2181 do_check_free_chunk(ar_ptr, next);
2186 static void do_check_malloced_chunk(arena *ar_ptr,
2187 mchunkptr p, INTERNAL_SIZE_T s)
2189 static void do_check_malloced_chunk(ar_ptr, p, s)
2190 arena *ar_ptr; mchunkptr p; INTERNAL_SIZE_T s;
2193 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2196 do_check_inuse_chunk(ar_ptr, p);
2198 /* Legal size ... */
2199 assert((long)sz >= (long)MINSIZE);
2200 assert((sz & MALLOC_ALIGN_MASK) == 0);
2202 assert(room < (long)MINSIZE);
2204 /* ... and alignment */
2205 assert(aligned_OK(chunk2mem(p)));
2208 /* ... and was allocated at front of an available chunk */
2209 assert(prev_inuse(p));
2214 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2215 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2216 #define check_chunk(A,P) do_check_chunk(A,P)
2217 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2219 #define check_free_chunk(A,P)
2220 #define check_inuse_chunk(A,P)
2221 #define check_chunk(A,P)
2222 #define check_malloced_chunk(A,P,N)
2228 Macro-based internal utilities
2233 Linking chunks in bin lists.
2234 Call these only with variables, not arbitrary expressions, as arguments.
2238 Place chunk p of size s in its bin, in size order,
2239 putting it ahead of others of same size.
2243 #define frontlink(A, P, S, IDX, BK, FD) \
2245 if (S < MAX_SMALLBIN_SIZE) \
2247 IDX = smallbin_index(S); \
2248 mark_binblock(A, IDX); \
2249 BK = bin_at(A, IDX); \
2253 FD->bk = BK->fd = P; \
2257 IDX = bin_index(S); \
2258 BK = bin_at(A, IDX); \
2260 if (FD == BK) mark_binblock(A, IDX); \
2263 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
2268 FD->bk = BK->fd = P; \
2273 /* take a chunk off a list */
2275 #define unlink(P, BK, FD) \
2283 /* Place p as the last remainder */
2285 #define link_last_remainder(A, P) \
2287 last_remainder(A)->fd = last_remainder(A)->bk = P; \
2288 P->fd = P->bk = last_remainder(A); \
2291 /* Clear the last_remainder bin */
2293 #define clear_last_remainder(A) \
2294 (last_remainder(A)->fd = last_remainder(A)->bk = last_remainder(A))
2301 Extend the top-most chunk by obtaining memory from system.
2302 Main interface to sbrk (but see also malloc_trim).
2306 static void malloc_extend_top(arena *ar_ptr, INTERNAL_SIZE_T nb)
2308 static void malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb;
2311 unsigned long pagesz = malloc_getpagesize;
2312 mchunkptr old_top = top(ar_ptr); /* Record state of old top */
2313 INTERNAL_SIZE_T old_top_size = chunksize(old_top);
2314 INTERNAL_SIZE_T top_size; /* new size of top chunk */
2317 if(ar_ptr == &main_arena) {
2320 char* brk; /* return value from sbrk */
2321 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
2322 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
2323 char* new_brk; /* return of 2nd sbrk call */
2324 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
2326 /* Pad request with top_pad plus minimal overhead */
2327 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
2329 /* If not the first time through, round to preserve page boundary */
2330 /* Otherwise, we need to correct to a page size below anyway. */
2331 /* (We also correct below if an intervening foreign sbrk call.) */
2333 if (sbrk_base != (char*)(-1))
2334 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
2336 brk = (char*)(MORECORE (sbrk_size));
2338 /* Fail if sbrk failed or if a foreign sbrk call killed our space */
2339 if (brk == (char*)(MORECORE_FAILURE) ||
2340 (brk < old_end && old_top != initial_top(&main_arena)))
2343 #if defined(_LIBC) || defined(MALLOC_HOOKS)
2344 /* Call the `morecore' hook if necessary. */
2345 if (__after_morecore_hook)
2346 (*__after_morecore_hook) ();
2349 sbrked_mem += sbrk_size;
2351 if (brk == old_end) { /* can just add bytes to current top */
2352 top_size = sbrk_size + old_top_size;
2353 set_head(old_top, top_size | PREV_INUSE);
2354 old_top = 0; /* don't free below */
2356 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
2359 /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
2360 sbrked_mem += brk - (char*)old_end;
2362 /* Guarantee alignment of first new chunk made from this space */
2363 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2364 if (front_misalign > 0) {
2365 correction = (MALLOC_ALIGNMENT) - front_misalign;
2370 /* Guarantee the next brk will be at a page boundary */
2371 correction += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
2373 /* Allocate correction */
2374 new_brk = (char*)(MORECORE (correction));
2375 if (new_brk == (char*)(MORECORE_FAILURE)) return;
2377 #if defined(_LIBC) || defined(MALLOC_HOOKS)
2378 /* Call the `morecore' hook if necessary. */
2379 if (__after_morecore_hook)
2380 (*__after_morecore_hook) ();
2383 sbrked_mem += correction;
2385 top(&main_arena) = (mchunkptr)brk;
2386 top_size = new_brk - brk + correction;
2387 set_head(top(&main_arena), top_size | PREV_INUSE);
2389 if (old_top == initial_top(&main_arena))
2390 old_top = 0; /* don't free below */
2393 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
2394 max_sbrked_mem = sbrked_mem;
2396 if ((unsigned long)(mmapped_mem + sbrked_mem) >
2397 (unsigned long)max_total_mem)
2398 max_total_mem = mmapped_mem + sbrked_mem;
2402 } else { /* ar_ptr != &main_arena */
2403 heap_info *old_heap, *heap;
2404 size_t old_heap_size;
2406 if(old_top_size < MINSIZE) /* this should never happen */
2409 /* First try to extend the current heap. */
2410 if(MINSIZE + nb <= old_top_size)
2412 old_heap = heap_for_ptr(old_top);
2413 old_heap_size = old_heap->size;
2414 if(grow_heap(old_heap, MINSIZE + nb - old_top_size) == 0) {
2415 ar_ptr->size += old_heap->size - old_heap_size;
2416 top_size = ((char *)old_heap + old_heap->size) - (char *)old_top;
2417 set_head(old_top, top_size | PREV_INUSE);
2421 /* A new heap must be created. */
2422 heap = new_heap(nb + (MINSIZE + sizeof(*heap)));
2425 heap->ar_ptr = ar_ptr;
2426 heap->prev = old_heap;
2427 ar_ptr->size += heap->size;
2429 /* Set up the new top, so we can safely use chunk_free() below. */
2430 top(ar_ptr) = chunk_at_offset(heap, sizeof(*heap));
2431 top_size = heap->size - sizeof(*heap);
2432 set_head(top(ar_ptr), top_size | PREV_INUSE);
2434 #endif /* !defined(NO_THREADS) */
2436 /* We always land on a page boundary */
2437 assert(((unsigned long)((char*)top(ar_ptr) + top_size) & (pagesz-1)) == 0);
2439 /* Setup fencepost and free the old top chunk. */
2441 /* The fencepost takes at least MINSIZE bytes, because it might
2442 become the top chunk again later. Note that a footer is set
2443 up, too, although the chunk is marked in use. */
2444 old_top_size -= MINSIZE;
2445 set_head(chunk_at_offset(old_top, old_top_size + 2*SIZE_SZ), 0|PREV_INUSE);
2446 if(old_top_size >= MINSIZE) {
2447 set_head(chunk_at_offset(old_top, old_top_size), (2*SIZE_SZ)|PREV_INUSE);
2448 set_foot(chunk_at_offset(old_top, old_top_size), (2*SIZE_SZ));
2449 set_head_size(old_top, old_top_size);
2450 chunk_free(ar_ptr, old_top);
2452 set_head(old_top, (old_top_size + 2*SIZE_SZ)|PREV_INUSE);
2453 set_foot(old_top, (old_top_size + 2*SIZE_SZ));
2461 /* Main public routines */
2467 The requested size is first converted into a usable form, `nb'.
2468 This currently means to add 4 bytes overhead plus possibly more to
2469 obtain 8-byte alignment and/or to obtain a size of at least
2470 MINSIZE (currently 16, 24, or 32 bytes), the smallest allocatable
2471 size. (All fits are considered `exact' if they are within MINSIZE
2474 From there, the first successful of the following steps is taken:
2476 1. The bin corresponding to the request size is scanned, and if
2477 a chunk of exactly the right size is found, it is taken.
2479 2. The most recently remaindered chunk is used if it is big
2480 enough. This is a form of (roving) first fit, used only in
2481 the absence of exact fits. Runs of consecutive requests use
2482 the remainder of the chunk used for the previous such request
2483 whenever possible. This limited use of a first-fit style
2484 allocation strategy tends to give contiguous chunks
2485 coextensive lifetimes, which improves locality and can reduce
2486 fragmentation in the long run.
2488 3. Other bins are scanned in increasing size order, using a
2489 chunk big enough to fulfill the request, and splitting off
2490 any remainder. This search is strictly by best-fit; i.e.,
2491 the smallest (with ties going to approximately the least
2492 recently used) chunk that fits is selected.
2494 4. If large enough, the chunk bordering the end of memory
2495 (`top') is split off. (This use of `top' is in accord with
2496 the best-fit search rule. In effect, `top' is treated as
2497 larger (and thus less well fitting) than any other available
2498 chunk since it can be extended to be as large as necessary
2499 (up to system limitations).
2501 5. If the request size meets the mmap threshold and the
2502 system supports mmap, and there are few enough currently
2503 allocated mmapped regions, and a call to mmap succeeds,
2504 the request is allocated via direct memory mapping.
2506 6. Otherwise, the top of memory is extended by
2507 obtaining more space from the system (normally using sbrk,
2508 but definable to anything else via the MORECORE macro).
2509 Memory is gathered from the system (in system page-sized
2510 units) in a way that allows chunks obtained across different
2511 sbrk calls to be consolidated, but does not require
2512 contiguous memory. Thus, it should be safe to intersperse
2513 mallocs with other sbrk calls.
2516 All allocations are made from the the `lowest' part of any found
2517 chunk. (The implementation invariant is that prev_inuse is
2518 always true of any allocated chunk; i.e., that each allocated
2519 chunk borders either a previously allocated and still in-use chunk,
2520 or the base of its memory arena.)
2525 Void_t* mALLOc(size_t bytes)
2527 Void_t* mALLOc(bytes) size_t bytes;
2531 INTERNAL_SIZE_T nb; /* padded request size */
2534 #if defined(_LIBC) || defined(MALLOC_HOOKS)
2535 if (__malloc_hook != NULL) {
2539 result = (*__malloc_hook)(bytes, __builtin_return_address (0));
2541 result = (*__malloc_hook)(bytes);
2547 nb = request2size(bytes);
2548 arena_get(ar_ptr, nb);
2551 victim = chunk_alloc(ar_ptr, nb);
2552 (void)mutex_unlock(&ar_ptr->mutex);
2554 /* Maybe the failure is due to running out of mmapped areas. */
2555 if(ar_ptr != &main_arena) {
2556 (void)mutex_lock(&main_arena.mutex);
2557 victim = chunk_alloc(&main_arena, nb);
2558 (void)mutex_unlock(&main_arena.mutex);
2560 if(!victim) return 0;
2562 return chunk2mem(victim);
2567 chunk_alloc(arena *ar_ptr, INTERNAL_SIZE_T nb)
2569 chunk_alloc(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb;
2572 mchunkptr victim; /* inspected/selected chunk */
2573 INTERNAL_SIZE_T victim_size; /* its size */
2574 int idx; /* index for bin traversal */
2575 mbinptr bin; /* associated bin */
2576 mchunkptr remainder; /* remainder from a split */
2577 long remainder_size; /* its size */
2578 int remainder_index; /* its bin index */
2579 unsigned long block; /* block traverser bit */
2580 int startidx; /* first bin of a traversed block */
2581 mchunkptr fwd; /* misc temp for linking */
2582 mchunkptr bck; /* misc temp for linking */
2583 mbinptr q; /* misc temp */
2586 /* Check for exact match in a bin */
2588 if (is_small_request(nb)) /* Faster version for small requests */
2590 idx = smallbin_index(nb);
2592 /* No traversal or size check necessary for small bins. */
2594 q = bin_at(ar_ptr, idx);
2597 /* Also scan the next one, since it would have a remainder < MINSIZE */
2605 victim_size = chunksize(victim);
2606 unlink(victim, bck, fwd);
2607 set_inuse_bit_at_offset(victim, victim_size);
2608 check_malloced_chunk(ar_ptr, victim, nb);
2612 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
2617 idx = bin_index(nb);
2618 bin = bin_at(ar_ptr, idx);
2620 for (victim = last(bin); victim != bin; victim = victim->bk)
2622 victim_size = chunksize(victim);
2623 remainder_size = victim_size - nb;
2625 if (remainder_size >= (long)MINSIZE) /* too big */
2627 --idx; /* adjust to rescan below after checking last remainder */
2631 else if (remainder_size >= 0) /* exact fit */
2633 unlink(victim, bck, fwd);
2634 set_inuse_bit_at_offset(victim, victim_size);
2635 check_malloced_chunk(ar_ptr, victim, nb);
2644 /* Try to use the last split-off remainder */
2646 if ( (victim = last_remainder(ar_ptr)->fd) != last_remainder(ar_ptr))
2648 victim_size = chunksize(victim);
2649 remainder_size = victim_size - nb;
2651 if (remainder_size >= (long)MINSIZE) /* re-split */
2653 remainder = chunk_at_offset(victim, nb);
2654 set_head(victim, nb | PREV_INUSE);
2655 link_last_remainder(ar_ptr, remainder);
2656 set_head(remainder, remainder_size | PREV_INUSE);
2657 set_foot(remainder, remainder_size);
2658 check_malloced_chunk(ar_ptr, victim, nb);
2662 clear_last_remainder(ar_ptr);
2664 if (remainder_size >= 0) /* exhaust */
2666 set_inuse_bit_at_offset(victim, victim_size);
2667 check_malloced_chunk(ar_ptr, victim, nb);
2671 /* Else place in bin */
2673 frontlink(ar_ptr, victim, victim_size, remainder_index, bck, fwd);
2677 If there are any possibly nonempty big-enough blocks,
2678 search for best fitting chunk by scanning bins in blockwidth units.
2681 if ( (block = idx2binblock(idx)) <= binblocks(ar_ptr))
2684 /* Get to the first marked block */
2686 if ( (block & binblocks(ar_ptr)) == 0)
2688 /* force to an even block boundary */
2689 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
2691 while ((block & binblocks(ar_ptr)) == 0)
2693 idx += BINBLOCKWIDTH;
2698 /* For each possibly nonempty block ... */
2701 startidx = idx; /* (track incomplete blocks) */
2702 q = bin = bin_at(ar_ptr, idx);
2704 /* For each bin in this block ... */
2707 /* Find and use first big enough chunk ... */
2709 for (victim = last(bin); victim != bin; victim = victim->bk)
2711 victim_size = chunksize(victim);
2712 remainder_size = victim_size - nb;
2714 if (remainder_size >= (long)MINSIZE) /* split */
2716 remainder = chunk_at_offset(victim, nb);
2717 set_head(victim, nb | PREV_INUSE);
2718 unlink(victim, bck, fwd);
2719 link_last_remainder(ar_ptr, remainder);
2720 set_head(remainder, remainder_size | PREV_INUSE);
2721 set_foot(remainder, remainder_size);
2722 check_malloced_chunk(ar_ptr, victim, nb);
2726 else if (remainder_size >= 0) /* take */
2728 set_inuse_bit_at_offset(victim, victim_size);
2729 unlink(victim, bck, fwd);
2730 check_malloced_chunk(ar_ptr, victim, nb);
2736 bin = next_bin(bin);
2738 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
2740 /* Clear out the block bit. */
2742 do /* Possibly backtrack to try to clear a partial block */
2744 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
2746 binblocks(ar_ptr) &= ~block;
2751 } while (first(q) == q);
2753 /* Get to the next possibly nonempty block */
2755 if ( (block <<= 1) <= binblocks(ar_ptr) && (block != 0) )
2757 while ((block & binblocks(ar_ptr)) == 0)
2759 idx += BINBLOCKWIDTH;
2769 /* Try to use top chunk */
2771 /* Require that there be a remainder, ensuring top always exists */
2772 if ( (remainder_size = chunksize(top(ar_ptr)) - nb) < (long)MINSIZE)
2776 /* If big and would otherwise need to extend, try to use mmap instead */
2777 if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
2778 (victim = mmap_chunk(nb)) != 0)
2783 malloc_extend_top(ar_ptr, nb);
2784 if ((remainder_size = chunksize(top(ar_ptr)) - nb) < (long)MINSIZE)
2785 return 0; /* propagate failure */
2788 victim = top(ar_ptr);
2789 set_head(victim, nb | PREV_INUSE);
2790 top(ar_ptr) = chunk_at_offset(victim, nb);
2791 set_head(top(ar_ptr), remainder_size | PREV_INUSE);
2792 check_malloced_chunk(ar_ptr, victim, nb);
2806 1. free(0) has no effect.
2808 2. If the chunk was allocated via mmap, it is released via munmap().
2810 3. If a returned chunk borders the current high end of memory,
2811 it is consolidated into the top, and if the total unused
2812 topmost memory exceeds the trim threshold, malloc_trim is
2815 4. Other chunks are consolidated as they arrive, and
2816 placed in corresponding bins. (This includes the case of
2817 consolidating with the current `last_remainder').
2823 void fREe(Void_t* mem)
2825 void fREe(mem) Void_t* mem;
2829 mchunkptr p; /* chunk corresponding to mem */
2831 #if defined(_LIBC) || defined(MALLOC_HOOKS)
2832 if (__free_hook != NULL) {
2834 (*__free_hook)(mem, __builtin_return_address (0));
2836 (*__free_hook)(mem);
2842 if (mem == 0) /* free(0) has no effect */
2848 if (chunk_is_mmapped(p)) /* release mmapped memory. */
2855 ar_ptr = arena_for_ptr(p);
2857 if(!mutex_trylock(&ar_ptr->mutex))
2858 ++(ar_ptr->stat_lock_direct);
2860 (void)mutex_lock(&ar_ptr->mutex);
2861 ++(ar_ptr->stat_lock_wait);
2864 (void)mutex_lock(&ar_ptr->mutex);
2866 chunk_free(ar_ptr, p);
2867 (void)mutex_unlock(&ar_ptr->mutex);
2872 chunk_free(arena *ar_ptr, mchunkptr p)
2874 chunk_free(ar_ptr, p) arena *ar_ptr; mchunkptr p;
2877 INTERNAL_SIZE_T hd = p->size; /* its head field */
2878 INTERNAL_SIZE_T sz; /* its size */
2879 int idx; /* its bin index */
2880 mchunkptr next; /* next contiguous chunk */
2881 INTERNAL_SIZE_T nextsz; /* its size */
2882 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
2883 mchunkptr bck; /* misc temp for linking */
2884 mchunkptr fwd; /* misc temp for linking */
2885 int islr; /* track whether merging with last_remainder */
2887 check_inuse_chunk(ar_ptr, p);
2889 sz = hd & ~PREV_INUSE;
2890 next = chunk_at_offset(p, sz);
2891 nextsz = chunksize(next);
2893 if (next == top(ar_ptr)) /* merge with top */
2897 if (!(hd & PREV_INUSE)) /* consolidate backward */
2899 prevsz = p->prev_size;
2900 p = chunk_at_offset(p, -prevsz);
2902 unlink(p, bck, fwd);
2905 set_head(p, sz | PREV_INUSE);
2909 if(ar_ptr == &main_arena) {
2911 if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
2915 heap_info *heap = heap_for_ptr(p);
2917 assert(heap->ar_ptr == ar_ptr);
2919 /* Try to get rid of completely empty heaps, if possible. */
2920 if((unsigned long)(sz) >= (unsigned long)trim_threshold ||
2921 p == chunk_at_offset(heap, sizeof(*heap)))
2922 heap_trim(heap, top_pad);
2930 if (!(hd & PREV_INUSE)) /* consolidate backward */
2932 prevsz = p->prev_size;
2933 p = chunk_at_offset(p, -prevsz);
2936 if (p->fd == last_remainder(ar_ptr)) /* keep as last_remainder */
2939 unlink(p, bck, fwd);
2942 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
2946 if (!islr && next->fd == last_remainder(ar_ptr))
2947 /* re-insert last_remainder */
2950 link_last_remainder(ar_ptr, p);
2953 unlink(next, bck, fwd);
2955 next = chunk_at_offset(p, sz);
2958 set_head(next, nextsz); /* clear inuse bit */
2960 set_head(p, sz | PREV_INUSE);
2961 next->prev_size = sz;
2963 frontlink(ar_ptr, p, sz, idx, bck, fwd);
2966 /* Check whether the heap containing top can go away now. */
2967 if(next->size < MINSIZE &&
2968 (unsigned long)sz > trim_threshold &&
2969 ar_ptr != &main_arena) { /* fencepost */
2970 heap_info* heap = heap_for_ptr(top(ar_ptr));
2972 if(top(ar_ptr) == chunk_at_offset(heap, sizeof(*heap)) &&
2973 heap->prev == heap_for_ptr(p))
2974 heap_trim(heap, top_pad);
2987 Chunks that were obtained via mmap cannot be extended or shrunk
2988 unless HAVE_MREMAP is defined, in which case mremap is used.
2989 Otherwise, if their reallocation is for additional space, they are
2990 copied. If for less, they are just left alone.
2992 Otherwise, if the reallocation is for additional space, and the
2993 chunk can be extended, it is, else a malloc-copy-free sequence is
2994 taken. There are several different ways that a chunk could be
2995 extended. All are tried:
2997 * Extending forward into following adjacent free chunk.
2998 * Shifting backwards, joining preceding adjacent space
2999 * Both shifting backwards and extending forward.
3000 * Extending into newly sbrked space
3002 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
3003 size argument of zero (re)allocates a minimum-sized chunk.
3005 If the reallocation is for less space, and the new request is for
3006 a `small' (<512 bytes) size, then the newly unused space is lopped
3009 The old unix realloc convention of allowing the last-free'd chunk
3010 to be used as an argument to realloc is no longer supported.
3011 I don't know of any programs still relying on this feature,
3012 and allowing it would also allow too many other incorrect
3013 usages of realloc to be sensible.
3020 Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
3022 Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
3026 INTERNAL_SIZE_T nb; /* padded request size */
3028 mchunkptr oldp; /* chunk corresponding to oldmem */
3029 INTERNAL_SIZE_T oldsize; /* its size */
3031 mchunkptr newp; /* chunk to return */
3033 #if defined(_LIBC) || defined(MALLOC_HOOKS)
3034 if (__realloc_hook != NULL) {
3038 result = (*__realloc_hook)(oldmem, bytes, __builtin_return_address (0));
3040 result = (*__realloc_hook)(oldmem, bytes);
3046 #ifdef REALLOC_ZERO_BYTES_FREES
3047 if (bytes == 0) { fREe(oldmem); return 0; }
3050 /* realloc of null is supposed to be same as malloc */
3051 if (oldmem == 0) return mALLOc(bytes);
3053 oldp = mem2chunk(oldmem);
3054 oldsize = chunksize(oldp);
3056 nb = request2size(bytes);
3059 if (chunk_is_mmapped(oldp))
3064 newp = mremap_chunk(oldp, nb);
3065 if(newp) return chunk2mem(newp);
3067 /* Note the extra SIZE_SZ overhead. */
3068 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3069 /* Must alloc, copy, free. */
3070 newmem = mALLOc(bytes);
3071 if (newmem == 0) return 0; /* propagate failure */
3072 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3078 ar_ptr = arena_for_ptr(oldp);
3080 if(!mutex_trylock(&ar_ptr->mutex))
3081 ++(ar_ptr->stat_lock_direct);
3083 (void)mutex_lock(&ar_ptr->mutex);
3084 ++(ar_ptr->stat_lock_wait);
3087 (void)mutex_lock(&ar_ptr->mutex);
3091 /* As in malloc(), remember this arena for the next allocation. */
3092 tsd_setspecific(arena_key, (Void_t *)ar_ptr);
3095 newp = chunk_realloc(ar_ptr, oldp, oldsize, nb);
3097 (void)mutex_unlock(&ar_ptr->mutex);
3098 return newp ? chunk2mem(newp) : NULL;
3103 chunk_realloc(arena* ar_ptr, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
3106 chunk_realloc(ar_ptr, oldp, oldsize, nb)
3107 arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
3110 mchunkptr newp = oldp; /* chunk to return */
3111 INTERNAL_SIZE_T newsize = oldsize; /* its size */
3113 mchunkptr next; /* next contiguous chunk after oldp */
3114 INTERNAL_SIZE_T nextsize; /* its size */
3116 mchunkptr prev; /* previous contiguous chunk before oldp */
3117 INTERNAL_SIZE_T prevsize; /* its size */
3119 mchunkptr remainder; /* holds split off extra space from newp */
3120 INTERNAL_SIZE_T remainder_size; /* its size */
3122 mchunkptr bck; /* misc temp for linking */
3123 mchunkptr fwd; /* misc temp for linking */
3125 check_inuse_chunk(ar_ptr, oldp);
3127 if ((long)(oldsize) < (long)(nb))
3130 /* Try expanding forward */
3132 next = chunk_at_offset(oldp, oldsize);
3133 if (next == top(ar_ptr) || !inuse(next))
3135 nextsize = chunksize(next);
3137 /* Forward into top only if a remainder */
3138 if (next == top(ar_ptr))
3140 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
3142 newsize += nextsize;
3143 top(ar_ptr) = chunk_at_offset(oldp, nb);
3144 set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE);
3145 set_head_size(oldp, nb);
3150 /* Forward into next chunk */
3151 else if (((long)(nextsize + newsize) >= (long)(nb)))
3153 unlink(next, bck, fwd);
3154 newsize += nextsize;
3164 /* Try shifting backwards. */
3166 if (!prev_inuse(oldp))
3168 prev = prev_chunk(oldp);
3169 prevsize = chunksize(prev);
3171 /* try forward + backward first to save a later consolidation */
3176 if (next == top(ar_ptr))
3178 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
3180 unlink(prev, bck, fwd);
3182 newsize += prevsize + nextsize;
3183 MALLOC_COPY(chunk2mem(newp), chunk2mem(oldp), oldsize - SIZE_SZ);
3184 top(ar_ptr) = chunk_at_offset(newp, nb);
3185 set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE);
3186 set_head_size(newp, nb);
3191 /* into next chunk */
3192 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
3194 unlink(next, bck, fwd);
3195 unlink(prev, bck, fwd);
3197 newsize += nextsize + prevsize;
3198 MALLOC_COPY(chunk2mem(newp), chunk2mem(oldp), oldsize - SIZE_SZ);
3204 if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
3206 unlink(prev, bck, fwd);
3208 newsize += prevsize;
3209 MALLOC_COPY(chunk2mem(newp), chunk2mem(oldp), oldsize - SIZE_SZ);
3216 newp = chunk_alloc (ar_ptr, nb);
3219 /* Maybe the failure is due to running out of mmapped areas. */
3220 if (ar_ptr != &main_arena) {
3221 (void)mutex_lock(&main_arena.mutex);
3222 newp = chunk_alloc(&main_arena, nb);
3223 (void)mutex_unlock(&main_arena.mutex);
3225 if (newp == 0) /* propagate failure */
3229 /* Avoid copy if newp is next chunk after oldp. */
3230 /* (This can only happen when new chunk is sbrk'ed.) */
3232 if ( newp == next_chunk(oldp))
3234 newsize += chunksize(newp);
3239 /* Otherwise copy, free, and exit */
3240 MALLOC_COPY(chunk2mem(newp), chunk2mem(oldp), oldsize - SIZE_SZ);
3241 chunk_free(ar_ptr, oldp);
3246 split: /* split off extra room in old or expanded chunk */
3248 if (newsize - nb >= MINSIZE) /* split off remainder */
3250 remainder = chunk_at_offset(newp, nb);
3251 remainder_size = newsize - nb;
3252 set_head_size(newp, nb);
3253 set_head(remainder, remainder_size | PREV_INUSE);
3254 set_inuse_bit_at_offset(remainder, remainder_size);
3255 chunk_free(ar_ptr, remainder);
3259 set_head_size(newp, newsize);
3260 set_inuse_bit_at_offset(newp, newsize);
3263 check_inuse_chunk(ar_ptr, newp);
3274 memalign requests more than enough space from malloc, finds a spot
3275 within that chunk that meets the alignment request, and then
3276 possibly frees the leading and trailing space.
3278 The alignment argument must be a power of two. This property is not
3279 checked by memalign, so misuse may result in random runtime errors.
3281 8-byte alignment is guaranteed by normal malloc calls, so don't
3282 bother calling memalign with an argument of 8 or less.
3284 Overreliance on memalign is a sure way to fragment space.
3290 Void_t* mEMALIGn(size_t alignment, size_t bytes)
3292 Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
3296 INTERNAL_SIZE_T nb; /* padded request size */
3299 #if defined(_LIBC) || defined(MALLOC_HOOKS)
3300 if (__memalign_hook != NULL) {
3304 result = (*__memalign_hook)(alignment, bytes,
3305 __builtin_return_address (0));
3307 result = (*__memalign_hook)(alignment, bytes);
3313 /* If need less alignment than we give anyway, just relay to malloc */
3315 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
3317 /* Otherwise, ensure that it is at least a minimum chunk size */
3319 if (alignment < MINSIZE) alignment = MINSIZE;
3321 nb = request2size(bytes);
3322 arena_get(ar_ptr, nb + alignment + MINSIZE);
3325 p = chunk_align(ar_ptr, nb, alignment);
3326 (void)mutex_unlock(&ar_ptr->mutex);
3328 /* Maybe the failure is due to running out of mmapped areas. */
3329 if(ar_ptr != &main_arena) {
3330 (void)mutex_lock(&main_arena.mutex);
3331 p = chunk_align(&main_arena, nb, alignment);
3332 (void)mutex_unlock(&main_arena.mutex);
3336 return chunk2mem(p);
3341 chunk_align(arena* ar_ptr, INTERNAL_SIZE_T nb, size_t alignment)
3343 chunk_align(ar_ptr, nb, alignment)
3344 arena* ar_ptr; INTERNAL_SIZE_T nb; size_t alignment;
3347 char* m; /* memory returned by malloc call */
3348 mchunkptr p; /* corresponding chunk */
3349 char* brk; /* alignment point within p */
3350 mchunkptr newp; /* chunk to return */
3351 INTERNAL_SIZE_T newsize; /* its size */
3352 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
3353 mchunkptr remainder; /* spare room at end to split off */
3354 long remainder_size; /* its size */
3356 /* Call chunk_alloc with worst case padding to hit alignment. */
3357 p = chunk_alloc(ar_ptr, nb + alignment + MINSIZE);
3359 return 0; /* propagate failure */
3363 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
3366 if(chunk_is_mmapped(p)) {
3367 return p; /* nothing more to do */
3371 else /* misaligned */
3374 Find an aligned spot inside chunk.
3375 Since we need to give back leading space in a chunk of at
3376 least MINSIZE, if the first calculation places us at
3377 a spot with less than MINSIZE leader, we can move to the
3378 next aligned spot -- we've allocated enough total room so that
3379 this is always possible.
3382 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -alignment);
3383 if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk += alignment;
3385 newp = (mchunkptr)brk;
3386 leadsize = brk - (char*)(p);
3387 newsize = chunksize(p) - leadsize;
3390 if(chunk_is_mmapped(p))
3392 newp->prev_size = p->prev_size + leadsize;
3393 set_head(newp, newsize|IS_MMAPPED);
3398 /* give back leader, use the rest */
3400 set_head(newp, newsize | PREV_INUSE);
3401 set_inuse_bit_at_offset(newp, newsize);
3402 set_head_size(p, leadsize);
3403 chunk_free(ar_ptr, p);
3406 assert (newsize>=nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
3409 /* Also give back spare room at the end */
3411 remainder_size = chunksize(p) - nb;
3413 if (remainder_size >= (long)MINSIZE)
3415 remainder = chunk_at_offset(p, nb);
3416 set_head(remainder, remainder_size | PREV_INUSE);
3417 set_head_size(p, nb);
3418 chunk_free(ar_ptr, remainder);
3421 check_inuse_chunk(ar_ptr, p);
3429 valloc just invokes memalign with alignment argument equal
3430 to the page size of the system (or as near to this as can
3431 be figured out from all the includes/defines above.)
3435 Void_t* vALLOc(size_t bytes)
3437 Void_t* vALLOc(bytes) size_t bytes;
3440 return mEMALIGn (malloc_getpagesize, bytes);
3444 pvalloc just invokes valloc for the nearest pagesize
3445 that will accommodate request
3450 Void_t* pvALLOc(size_t bytes)
3452 Void_t* pvALLOc(bytes) size_t bytes;
3455 size_t pagesize = malloc_getpagesize;
3456 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
3461 calloc calls chunk_alloc, then zeroes out the allocated chunk.
3466 Void_t* cALLOc(size_t n, size_t elem_size)
3468 Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
3472 mchunkptr p, oldtop;
3473 INTERNAL_SIZE_T sz, csz, oldtopsize;
3476 #if defined(_LIBC) || defined(MALLOC_HOOKS)
3477 if (__malloc_hook != NULL) {
3480 mem = (*__malloc_hook)(sz, __builtin_return_address (0));
3482 mem = (*__malloc_hook)(sz);
3487 return memset(mem, 0, sz);
3489 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3495 sz = request2size(n * elem_size);
3496 arena_get(ar_ptr, sz);
3500 /* check if expand_top called, in which case don't need to clear */
3502 oldtop = top(ar_ptr);
3503 oldtopsize = chunksize(top(ar_ptr));
3505 p = chunk_alloc (ar_ptr, sz);
3507 /* Only clearing follows, so we can unlock early. */
3508 (void)mutex_unlock(&ar_ptr->mutex);
3511 /* Maybe the failure is due to running out of mmapped areas. */
3512 if(ar_ptr != &main_arena) {
3513 (void)mutex_lock(&main_arena.mutex);
3514 p = chunk_alloc(&main_arena, sz);
3515 (void)mutex_unlock(&main_arena.mutex);
3517 if (p == 0) return 0;
3521 /* Two optional cases in which clearing not necessary */
3524 if (chunk_is_mmapped(p)) return mem;
3530 if (p == oldtop && csz > oldtopsize) {
3531 /* clear only the bytes from non-freshly-sbrked memory */
3536 MALLOC_ZERO(mem, csz - SIZE_SZ);
3542 cfree just calls free. It is needed/defined on some systems
3543 that pair it with calloc, presumably for odd historical reasons.
3549 void cfree(Void_t *mem)
3551 void cfree(mem) Void_t *mem;
3562 Malloc_trim gives memory back to the system (via negative
3563 arguments to sbrk) if there is unused memory at the `high' end of
3564 the malloc pool. You can call this after freeing large blocks of
3565 memory to potentially reduce the system-level memory requirements
3566 of a program. However, it cannot guarantee to reduce memory. Under
3567 some allocation patterns, some large free blocks of memory will be
3568 locked between two used chunks, so they cannot be given back to
3571 The `pad' argument to malloc_trim represents the amount of free
3572 trailing space to leave untrimmed. If this argument is zero,
3573 only the minimum amount of memory to maintain internal data
3574 structures will be left (one page or less). Non-zero arguments
3575 can be supplied to maintain enough trailing space to service
3576 future expected allocations without having to re-obtain memory
3579 Malloc_trim returns 1 if it actually released any memory, else 0.
3584 int mALLOC_TRIm(size_t pad)
3586 int mALLOC_TRIm(pad) size_t pad;
3591 (void)mutex_lock(&main_arena.mutex);
3592 res = main_trim(pad);
3593 (void)mutex_unlock(&main_arena.mutex);
3597 /* Trim the main arena. */
3601 main_trim(size_t pad)
3603 main_trim(pad) size_t pad;
3606 mchunkptr top_chunk; /* The current top chunk */
3607 long top_size; /* Amount of top-most memory */
3608 long extra; /* Amount to release */
3609 char* current_brk; /* address returned by pre-check sbrk call */
3610 char* new_brk; /* address returned by negative sbrk call */
3612 unsigned long pagesz = malloc_getpagesize;
3614 top_chunk = top(&main_arena);
3615 top_size = chunksize(top_chunk);
3616 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3618 if (extra < (long)pagesz) /* Not enough memory to release */
3621 /* Test to make sure no one else called sbrk */
3622 current_brk = (char*)(MORECORE (0));
3623 if (current_brk != (char*)(top_chunk) + top_size)
3624 return 0; /* Apparently we don't own memory; must fail */
3626 new_brk = (char*)(MORECORE (-extra));
3628 #if defined(_LIBC) || defined(MALLOC_HOOKS)
3629 /* Call the `morecore' hook if necessary. */
3630 if (__after_morecore_hook)
3631 (*__after_morecore_hook) ();
3634 if (new_brk == (char*)(MORECORE_FAILURE)) { /* sbrk failed? */
3635 /* Try to figure out what we have */
3636 current_brk = (char*)(MORECORE (0));
3637 top_size = current_brk - (char*)top_chunk;
3638 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
3640 sbrked_mem = current_brk - sbrk_base;
3641 set_head(top_chunk, top_size | PREV_INUSE);
3643 check_chunk(&main_arena, top_chunk);
3646 sbrked_mem -= extra;
3648 /* Success. Adjust top accordingly. */
3649 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
3650 check_chunk(&main_arena, top_chunk);
3658 heap_trim(heap_info *heap, size_t pad)
3660 heap_trim(heap, pad) heap_info *heap; size_t pad;
3663 unsigned long pagesz = malloc_getpagesize;
3664 arena *ar_ptr = heap->ar_ptr;
3665 mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
3666 heap_info *prev_heap;
3667 long new_size, top_size, extra;
3669 /* Can this heap go away completely ? */
3670 while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
3671 prev_heap = heap->prev;
3672 p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
3673 assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
3675 new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
3676 assert(new_size>0 && new_size<(long)(2*MINSIZE));
3678 new_size += p->prev_size;
3679 assert(new_size>0 && new_size<HEAP_MAX_SIZE);
3680 if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
3682 ar_ptr->size -= heap->size;
3685 if(!prev_inuse(p)) { /* consolidate backward */
3687 unlink(p, bck, fwd);
3689 assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
3690 assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
3691 top(ar_ptr) = top_chunk = p;
3692 set_head(top_chunk, new_size | PREV_INUSE);
3693 check_chunk(ar_ptr, top_chunk);
3695 top_size = chunksize(top_chunk);
3696 extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz;
3697 if(extra < (long)pagesz)
3699 /* Try to shrink. */
3700 if(grow_heap(heap, -extra) != 0)
3702 ar_ptr->size -= extra;
3704 /* Success. Adjust top accordingly. */
3705 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
3706 check_chunk(ar_ptr, top_chunk);
3717 This routine tells you how many bytes you can actually use in an
3718 allocated chunk, which may be more than you requested (although
3719 often not). You can use this many bytes without worrying about
3720 overwriting other allocated objects. Not a particularly great
3721 programming practice, but still sometimes useful.
3726 size_t mALLOC_USABLE_SIZe(Void_t* mem)
3728 size_t mALLOC_USABLE_SIZe(mem) Void_t* mem;
3738 if(!chunk_is_mmapped(p))
3740 if (!inuse(p)) return 0;
3741 check_inuse_chunk(arena_for_ptr(mem), p);
3742 return chunksize(p) - SIZE_SZ;
3744 return chunksize(p) - 2*SIZE_SZ;
3751 /* Utility to update mallinfo for malloc_stats() and mallinfo() */
3755 malloc_update_mallinfo(arena *ar_ptr, struct mallinfo *mi)
3757 malloc_update_mallinfo(ar_ptr, mi) arena *ar_ptr; struct mallinfo *mi;
3766 INTERNAL_SIZE_T avail;
3768 (void)mutex_lock(&ar_ptr->mutex);
3769 avail = chunksize(top(ar_ptr));
3770 navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
3772 for (i = 1; i < NAV; ++i)
3774 b = bin_at(ar_ptr, i);
3775 for (p = last(b); p != b; p = p->bk)
3778 check_free_chunk(ar_ptr, p);
3779 for (q = next_chunk(p);
3780 q != top(ar_ptr) && inuse(q) && (long)chunksize(q) > 0;
3782 check_inuse_chunk(ar_ptr, q);
3784 avail += chunksize(p);
3789 mi->arena = ar_ptr->size;
3790 mi->ordblks = navail;
3791 mi->uordblks = ar_ptr->size - avail;
3792 mi->fordblks = avail;
3793 mi->hblks = n_mmaps;
3794 mi->hblkhd = mmapped_mem;
3795 mi->keepcost = chunksize(top(ar_ptr));
3797 (void)mutex_unlock(&ar_ptr->mutex);
3800 #if !defined(NO_THREADS) && MALLOC_DEBUG > 1
3802 /* Print the complete contents of a single heap to stderr. */
3806 dump_heap(heap_info *heap)
3808 dump_heap(heap) heap_info *heap;
3814 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
3815 ptr = (heap->ar_ptr != (arena*)(heap+1)) ?
3816 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(arena);
3817 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
3818 ~MALLOC_ALIGN_MASK);
3820 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
3821 if(p == top(heap->ar_ptr)) {
3822 fprintf(stderr, " (top)\n");
3824 } else if(p->size == (0|PREV_INUSE)) {
3825 fprintf(stderr, " (fence)\n");
3828 fprintf(stderr, "\n");
3841 For all arenas separately and in total, prints on stderr the
3842 amount of space obtained from the system, and the current number
3843 of bytes allocated via malloc (or realloc, etc) but not yet
3844 freed. (Note that this is the number of bytes allocated, not the
3845 number requested. It will be larger than the number requested
3846 because of alignment and bookkeeping overhead.) When not compiled
3847 for multiple threads, the maximum amount of allocated memory
3848 (which may be more than current if malloc_trim and/or munmap got
3849 called) is also reported. When using mmap(), prints the maximum
3850 number of simultaneous mmap regions used, too.
3859 unsigned int in_use_b = mmapped_mem, system_b = in_use_b;
3861 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
3864 for(i=0, ar_ptr = &main_arena;; i++) {
3865 malloc_update_mallinfo(ar_ptr, &mi);
3866 fprintf(stderr, "Arena %d:\n", i);
3867 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
3868 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
3869 system_b += mi.arena;
3870 in_use_b += mi.uordblks;
3872 stat_lock_direct += ar_ptr->stat_lock_direct;
3873 stat_lock_loop += ar_ptr->stat_lock_loop;
3874 stat_lock_wait += ar_ptr->stat_lock_wait;
3876 #if !defined(NO_THREADS) && MALLOC_DEBUG > 1
3877 if(ar_ptr != &main_arena) {
3879 (void)mutex_lock(&ar_ptr->mutex);
3880 heap = heap_for_ptr(top(ar_ptr));
3881 while(heap) { dump_heap(heap); heap = heap->prev; }
3882 (void)mutex_unlock(&ar_ptr->mutex);
3885 ar_ptr = ar_ptr->next;
3886 if(ar_ptr == &main_arena) break;
3889 fprintf(stderr, "Total (incl. mmap):\n");
3891 fprintf(stderr, "Total:\n");
3893 fprintf(stderr, "system bytes = %10u\n", system_b);
3894 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
3896 fprintf(stderr, "max system bytes = %10u\n", (unsigned int)max_total_mem);
3899 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)max_n_mmaps);
3900 fprintf(stderr, "max mmap bytes = %10lu\n", max_mmapped_mem);
3903 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
3904 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
3905 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
3906 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
3907 fprintf(stderr, "locked total = %10ld\n",
3908 stat_lock_direct + stat_lock_loop + stat_lock_wait);
3913 mallinfo returns a copy of updated current mallinfo.
3914 The information reported is for the arena last used by the thread.
3917 struct mallinfo mALLINFo()
3920 Void_t *vptr = NULL;
3923 tsd_getspecific(arena_key, vptr);
3925 malloc_update_mallinfo((vptr ? (arena*)vptr : &main_arena), &mi);
3935 mallopt is the general SVID/XPG interface to tunable parameters.
3936 The format is to provide a (parameter-number, parameter-value) pair.
3937 mallopt then sets the corresponding parameter to the argument
3938 value if it can (i.e., so long as the value is meaningful),
3939 and returns 1 if successful else 0.
3941 See descriptions of tunable parameters above.
3946 int mALLOPt(int param_number, int value)
3948 int mALLOPt(param_number, value) int param_number; int value;
3951 switch(param_number)
3953 case M_TRIM_THRESHOLD:
3954 trim_threshold = value; return 1;
3956 top_pad = value; return 1;
3957 case M_MMAP_THRESHOLD:
3959 /* Forbid setting the threshold too high. */
3960 if((unsigned long)value > HEAP_MAX_SIZE/2) return 0;
3962 mmap_threshold = value; return 1;
3965 n_mmaps_max = value; return 1;
3967 if (value != 0) return 0; else n_mmaps_max = value; return 1;
3969 case M_CHECK_ACTION:
3970 check_action = value; return 1;
3979 /* Get/set state: malloc_get_state() records the current state of all
3980 malloc variables (_except_ for the actual heap contents and `hook'
3981 function pointers) in a system dependent, opaque data structure.
3982 This data structure is dynamically allocated and can be free()d
3983 after use. malloc_set_state() restores the state of all malloc
3984 variables to the previously obtained state. This is especially
3985 useful when using this malloc as part of a shared library, and when
3986 the heap contents are saved/restored via some other method. The
3987 primary example for this is GNU Emacs with its `dumping' procedure.
3988 `Hook' function pointers are never saved or restored by these
3991 #define MALLOC_STATE_MAGIC 0x444c4541l
3992 #define MALLOC_STATE_VERSION (0*0x100l + 0l) /* major*0x100 + minor */
3994 struct malloc_state {
3997 mbinptr av[NAV * 2 + 2];
3999 int sbrked_mem_bytes;
4000 unsigned long trim_threshold;
4001 unsigned long top_pad;
4002 unsigned int n_mmaps_max;
4003 unsigned long mmap_threshold;
4005 unsigned long max_sbrked_mem;
4006 unsigned long max_total_mem;
4007 unsigned int n_mmaps;
4008 unsigned int max_n_mmaps;
4009 unsigned long mmapped_mem;
4010 unsigned long max_mmapped_mem;
4017 struct malloc_state* ms;
4022 (void)mutex_lock(&main_arena.mutex);
4023 victim = chunk_alloc(&main_arena, request2size(sizeof(*ms)));
4025 (void)mutex_unlock(&main_arena.mutex);
4028 ms = (struct malloc_state*)chunk2mem(victim);
4029 ms->magic = MALLOC_STATE_MAGIC;
4030 ms->version = MALLOC_STATE_VERSION;
4031 ms->av[0] = main_arena.av[0];
4032 ms->av[1] = main_arena.av[1];
4033 for(i=0; i<NAV; i++) {
4034 b = bin_at(&main_arena, i);
4036 ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin (or initial top) */
4038 ms->av[2*i+2] = first(b);
4039 ms->av[2*i+3] = last(b);
4042 ms->sbrk_base = sbrk_base;
4043 ms->sbrked_mem_bytes = sbrked_mem;
4044 ms->trim_threshold = trim_threshold;
4045 ms->top_pad = top_pad;
4046 ms->n_mmaps_max = n_mmaps_max;
4047 ms->mmap_threshold = mmap_threshold;
4048 ms->check_action = check_action;
4049 ms->max_sbrked_mem = max_sbrked_mem;
4051 ms->max_total_mem = max_total_mem;
4053 ms->max_total_mem = 0;
4055 ms->n_mmaps = n_mmaps;
4056 ms->max_n_mmaps = max_n_mmaps;
4057 ms->mmapped_mem = mmapped_mem;
4058 ms->max_mmapped_mem = max_mmapped_mem;
4059 (void)mutex_unlock(&main_arena.mutex);
4065 mALLOC_SET_STATe(Void_t* msptr)
4067 mALLOC_SET_STATe(msptr) Void_t* msptr;
4070 struct malloc_state* ms = (struct malloc_state*)msptr;
4075 if(ms->magic != MALLOC_STATE_MAGIC) return -1;
4076 /* Must fail if the major version is too high. */
4077 if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
4078 (void)mutex_lock(&main_arena.mutex);
4079 main_arena.av[0] = ms->av[0];
4080 main_arena.av[1] = ms->av[1];
4081 for(i=0; i<NAV; i++) {
4082 b = bin_at(&main_arena, i);
4083 if(ms->av[2*i+2] == 0)
4084 first(b) = last(b) = b;
4086 first(b) = ms->av[2*i+2];
4087 last(b) = ms->av[2*i+3];
4089 /* Make sure the links to the `av'-bins in the heap are correct. */
4095 sbrk_base = ms->sbrk_base;
4096 sbrked_mem = ms->sbrked_mem_bytes;
4097 trim_threshold = ms->trim_threshold;
4098 top_pad = ms->top_pad;
4099 n_mmaps_max = ms->n_mmaps_max;
4100 mmap_threshold = ms->mmap_threshold;
4101 check_action = ms->check_action;
4102 max_sbrked_mem = ms->max_sbrked_mem;
4104 max_total_mem = ms->max_total_mem;
4106 n_mmaps = ms->n_mmaps;
4107 max_n_mmaps = ms->max_n_mmaps;
4108 mmapped_mem = ms->mmapped_mem;
4109 max_mmapped_mem = ms->max_mmapped_mem;
4110 /* add version-dependent code here */
4111 (void)mutex_unlock(&main_arena.mutex);
4117 #if defined(_LIBC) || defined(MALLOC_HOOKS)
4119 /* A simple, standard set of debugging hooks. Overhead is `only' one
4120 byte per chunk; still this will catch most cases of double frees or
4123 #define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
4125 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
4126 pointer. If the provided pointer is not valid, return NULL. The
4127 goal here is to avoid crashes, unlike in the MALLOC_DEBUG code. */
4131 mem2chunk_check(Void_t* mem)
4133 mem2chunk_check(mem) Void_t* mem;
4140 if(!aligned_OK(p)) return NULL;
4141 if( (char*)p>=sbrk_base && (char*)p<(sbrk_base+sbrked_mem) ) {
4142 /* Must be a chunk in conventional heap memory. */
4143 if(chunk_is_mmapped(p) ||
4144 ( (sz = chunksize(p)), ((char*)p + sz)>=(sbrk_base+sbrked_mem) ) ||
4145 sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
4146 ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
4147 (long)prev_chunk(p)<(long)sbrk_base ||
4148 next_chunk(prev_chunk(p))!=p) ))
4150 if(*((unsigned char*)p + sz + (SIZE_SZ-1)) != MAGICBYTE(p))
4152 *((unsigned char*)p + sz + (SIZE_SZ-1)) ^= 0xFF;
4154 unsigned long offset, page_mask = malloc_getpagesize-1;
4156 /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
4157 alignment relative to the beginning of a page. Check this
4159 offset = (unsigned long)mem & page_mask;
4160 if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
4161 offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
4162 offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
4164 !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
4165 ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
4166 ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
4168 if(*((unsigned char*)p + sz - 1) != MAGICBYTE(p))
4170 *((unsigned char*)p + sz - 1) ^= 0xFF;
4177 malloc_check(size_t sz, const Void_t *caller)
4180 malloc_check(size_t sz)
4182 malloc_check(sz) size_t sz;
4187 INTERNAL_SIZE_T nb = request2size(sz + 1);
4189 (void)mutex_lock(&main_arena.mutex);
4190 victim = chunk_alloc(&main_arena, nb);
4191 (void)mutex_unlock(&main_arena.mutex);
4192 if(!victim) return NULL;
4193 nb = chunksize(victim);
4194 if(chunk_is_mmapped(victim))
4198 *((unsigned char*)victim + nb) = MAGICBYTE(victim);
4199 return chunk2mem(victim);
4204 free_check(Void_t* mem, const Void_t *caller)
4207 free_check(Void_t* mem)
4209 free_check(mem) Void_t* mem;
4216 (void)mutex_lock(&main_arena.mutex);
4217 p = mem2chunk_check(mem);
4219 (void)mutex_unlock(&main_arena.mutex);
4220 switch(check_action) {
4222 fprintf(stderr, "free(): invalid pointer %lx!\n", (long)(mem));
4230 if (chunk_is_mmapped(p)) {
4231 (void)mutex_unlock(&main_arena.mutex);
4236 #if 0 /* Erase freed memory. */
4237 memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
4239 chunk_free(&main_arena, p);
4240 (void)mutex_unlock(&main_arena.mutex);
4245 realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
4248 realloc_check(Void_t* oldmem, size_t bytes)
4250 realloc_check(oldmem, bytes) Void_t* oldmem; size_t bytes;
4254 mchunkptr oldp, newp;
4255 INTERNAL_SIZE_T nb, oldsize;
4258 if (oldmem == 0) return malloc_check(bytes, NULL);
4260 if (oldmem == 0) return malloc_check(bytes);
4262 (void)mutex_lock(&main_arena.mutex);
4263 oldp = mem2chunk_check(oldmem);
4265 (void)mutex_unlock(&main_arena.mutex);
4266 switch(check_action) {
4268 fprintf(stderr, "realloc(): invalid pointer %lx!\n", (long)(oldmem));
4274 return malloc_check(bytes, NULL);
4276 return malloc_check(bytes);
4279 oldsize = chunksize(oldp);
4281 nb = request2size(bytes+1);
4284 if (chunk_is_mmapped(oldp)) {
4286 newp = mremap_chunk(oldp, nb);
4289 /* Note the extra SIZE_SZ overhead. */
4290 if(oldsize - SIZE_SZ >= nb) newp = oldp; /* do nothing */
4292 /* Must alloc, copy, free. */
4293 newp = chunk_alloc(&main_arena, nb);
4295 MALLOC_COPY(chunk2mem(newp), oldmem, oldsize - 2*SIZE_SZ);
4303 #endif /* HAVE_MMAP */
4304 newp = chunk_realloc(&main_arena, oldp, oldsize, nb);
4305 #if 0 /* Erase freed memory. */
4306 nb = chunksize(newp);
4307 if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
4308 memset((char*)oldmem + 2*sizeof(mbinptr), 0,
4309 oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
4310 } else if(nb > oldsize+SIZE_SZ) {
4311 memset((char*)chunk2mem(newp) + oldsize, 0, nb - (oldsize+SIZE_SZ));
4317 (void)mutex_unlock(&main_arena.mutex);
4319 if(!newp) return NULL;
4320 nb = chunksize(newp);
4321 if(chunk_is_mmapped(newp))
4325 *((unsigned char*)newp + nb) = MAGICBYTE(newp);
4326 return chunk2mem(newp);
4331 memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
4334 memalign_check(size_t alignment, size_t bytes)
4336 memalign_check(alignment, bytes) size_t alignment; size_t bytes;