#endif
+#ifndef LACKS_UNISTD_H
+# include <unistd.h>
+#endif
+
/*
Define HAVE_MMAP to optionally make malloc() use mmap() to
allocate very large blocks. These will be returned to the
*/
#ifndef HAVE_MMAP
-#define HAVE_MMAP 1
+# ifdef _POSIX_MAPPED_FILES
+# define HAVE_MMAP 1
+# endif
#endif
/*
bsd/gnu getpagesize.h
*/
-#ifndef LACKS_UNISTD_H
-# include <unistd.h>
-#endif
-
#ifndef malloc_getpagesize
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
# ifndef _SC_PAGE_SIZE
initialization routine, then do the normal work. */
static Void_t*
-#ifdef _LIBC
-malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
-#else
#if __STD_C
-malloc_hook_ini(size_t sz)
+malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
#else
-malloc_hook_ini(sz) size_t sz;
-#endif
+malloc_hook_ini(sz, caller)
+ size_t sz; const __malloc_ptr_t caller;
#endif
{
__malloc_hook = NULL;
- __realloc_hook = NULL;
- __memalign_hook = NULL;
ptmalloc_init();
return mALLOc(sz);
}
{
__malloc_hook = NULL;
__realloc_hook = NULL;
- __memalign_hook = NULL;
ptmalloc_init();
return rEALLOc(ptr, sz);
}
size_t sz; size_t alignment; const __malloc_ptr_t caller;
#endif
{
- __malloc_hook = NULL;
- __realloc_hook = NULL;
__memalign_hook = NULL;
ptmalloc_init();
return mEMALIGn(sz, alignment);
= memalign_hook_ini;
void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;
+/* Whether we are using malloc checking. */
+static int using_malloc_checking;
+
+/* A flag that is set by malloc_set_state, to signal that malloc checking
+ must not be enabled on the request from the user (via the MALLOC_CHECK_
+ environment variable). It is reset by __malloc_check_init to tell
+ malloc_set_state that the user has requested malloc checking.
+
+ The purpose of this flag is to make sure that malloc checking is not
+ enabled when the heap to be restored was constructed without malloc
+ checking, and thus does not contain the required magic bytes.
+ Otherwise the heap would be corrupted by calls to free and realloc. If
+ it turns out that the heap was created with malloc checking and the
+ user has requested it malloc_set_state just calls __malloc_check_init
+ again to enable it. On the other hand, reusing such a heap without
+ further malloc checking is safe. */
+static int disallow_malloc_check;
+
/* Activate a standard set of debugging hooks. */
void
__malloc_check_init()
{
+ if (disallow_malloc_check) {
+ disallow_malloc_check = 0;
+ return;
+ }
+ using_malloc_checking = 1;
__malloc_hook = malloc_check;
__free_hook = free_check;
__realloc_hook = realloc_check;
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
-#define MMAP(size, prot, flags) ((dev_zero_fd < 0) ? \
+#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
(dev_zero_fd = open("/dev/zero", O_RDWR), \
- mmap(0, (size), (prot), (flags), dev_zero_fd, 0)) : \
- mmap(0, (size), (prot), (flags), dev_zero_fd, 0))
+ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
+ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
#else
-#define MMAP(size, prot, flags) \
- (mmap(0, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
+#define MMAP(addr, size, prot, flags) \
+ (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
#endif
*/
size = (size + SIZE_SZ + page_mask) & ~page_mask;
- p = (mchunkptr)MMAP(size, PROT_READ|PROT_WRITE, MAP_PRIVATE);
+ p = (mchunkptr)MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE);
if(p == (mchunkptr) MAP_FAILED) return 0;
n_mmaps++;
No swap space needs to be reserved for the following large
mapping (on Linux, this is the case for all non-writable mappings
anyway). */
- p1 = (char *)MMAP(HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
+ p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p1 == MAP_FAILED)
return 0;
p2 = (char *)(((unsigned long)p1 + HEAP_MAX_SIZE) & ~(HEAP_MAX_SIZE-1));
new_size = (long)h->size + diff;
if(new_size < (long)sizeof(*h))
return -1;
- if(mprotect((char *)h + new_size, -diff, PROT_NONE) != 0)
+ /* Try to re-map the extra heap space freshly to save memory, and
+ make it inaccessible. */
+ if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE,
+ MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
return -2;
}
h->size = new_size;
if(ar_ptr != &main_arena) {
heap_info *heap = heap_for_ptr(p);
assert(heap->ar_ptr == ar_ptr);
- assert((char *)p + sz <= (char *)heap + heap->size);
+ if(p != top(ar_ptr))
+ assert((char *)p + sz <= (char *)heap + heap->size);
+ else
+ assert((char *)p + sz == (char *)heap + heap->size);
return;
}
#endif
mallocs with other sbrk calls.
- All allocations are made from the the `lowest' part of any found
+ All allocations are made from the `lowest' part of any found
chunk. (The implementation invariant is that prev_inuse is
always true of any allocated chunk; i.e., that each allocated
chunk borders either a previously allocated and still in-use chunk,
functions. */
#define MALLOC_STATE_MAGIC 0x444c4541l
-#define MALLOC_STATE_VERSION (0*0x100l + 0l) /* major*0x100 + minor */
+#define MALLOC_STATE_VERSION (0*0x100l + 1l) /* major*0x100 + minor */
struct malloc_state {
long magic;
unsigned int max_n_mmaps;
unsigned long mmapped_mem;
unsigned long max_mmapped_mem;
+ int using_malloc_checking;
};
Void_t*
mALLOC_GET_STATe()
{
- mchunkptr victim;
struct malloc_state* ms;
int i;
mbinptr b;
- ptmalloc_init();
- (void)mutex_lock(&main_arena.mutex);
- victim = chunk_alloc(&main_arena, request2size(sizeof(*ms)));
- if(!victim) {
- (void)mutex_unlock(&main_arena.mutex);
+ ms = (struct malloc_state*)mALLOc(sizeof(*ms));
+ if (!ms)
return 0;
- }
- ms = (struct malloc_state*)chunk2mem(victim);
+ (void)mutex_lock(&main_arena.mutex);
ms->magic = MALLOC_STATE_MAGIC;
ms->version = MALLOC_STATE_VERSION;
ms->av[0] = main_arena.av[0];
ms->max_n_mmaps = max_n_mmaps;
ms->mmapped_mem = mmapped_mem;
ms->max_mmapped_mem = max_mmapped_mem;
+#if defined _LIBC || defined MALLOC_HOOKS
+ ms->using_malloc_checking = using_malloc_checking;
+#else
+ ms->using_malloc_checking = 0;
+#endif
(void)mutex_unlock(&main_arena.mutex);
return (Void_t*)ms;
}
int i;
mbinptr b;
+#if defined _LIBC || defined MALLOC_HOOKS
+ disallow_malloc_check = 1;
+#endif
ptmalloc_init();
if(ms->magic != MALLOC_STATE_MAGIC) return -1;
/* Must fail if the major version is too high. */
mmapped_mem = ms->mmapped_mem;
max_mmapped_mem = ms->max_mmapped_mem;
/* add version-dependent code here */
+ if (ms->version >= 1) {
+#if defined _LIBC || defined MALLOC_HOOKS
+ /* Check whether it is safe to enable malloc checking. */
+ if (ms->using_malloc_checking && !using_malloc_checking &&
+ !disallow_malloc_check)
+ __malloc_check_init ();
+#endif
+ }
+
(void)mutex_unlock(&main_arena.mutex);
return 0;
}
(void)mutex_unlock(&main_arena.mutex);
switch(check_action) {
case 1:
- fprintf(stderr, "free(): invalid pointer %lx!\n", (long)(mem));
+ fprintf(stderr, "free(): invalid pointer %p!\n", mem);
break;
case 2:
abort();
(void)mutex_unlock(&main_arena.mutex);
switch(check_action) {
case 1:
- fprintf(stderr, "realloc(): invalid pointer %lx!\n", (long)(oldmem));
+ fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
break;
case 2:
abort();