summaryrefslogtreecommitdiff
path: root/libc/stdlib/malloc-standard
diff options
context:
space:
mode:
authorEric Andersen <andersen@codepoet.org>2003-12-30 10:40:49 +0000
committerEric Andersen <andersen@codepoet.org>2003-12-30 10:40:49 +0000
commit8d532c51318bad2436880ecac972c9dfa3996c9b (patch)
tree821863358734242feb99643e9d66ee9b175ad464 /libc/stdlib/malloc-standard
parent4c9086ee4afde4257a4b4a8f55e05932d1b6acfd (diff)
Rework malloc. The new default implementation is based on dlmalloc from Doug
Lea. It is about 2x faster than the old malloc-930716, and behave itself much better -- it will properly release memory back to the system, and it uses a combination of brk() for small allocations and mmap() for larger allocations. -Erik
Diffstat (limited to 'libc/stdlib/malloc-standard')
-rw-r--r--libc/stdlib/malloc-standard/Makefile51
-rw-r--r--libc/stdlib/malloc-standard/calloc.c93
-rw-r--r--libc/stdlib/malloc-standard/free.c382
-rw-r--r--libc/stdlib/malloc-standard/mallinfo.c81
-rw-r--r--libc/stdlib/malloc-standard/malloc.c1160
-rw-r--r--libc/stdlib/malloc-standard/malloc.h953
-rw-r--r--libc/stdlib/malloc-standard/mallopt.c64
-rw-r--r--libc/stdlib/malloc-standard/memalign.c126
-rw-r--r--libc/stdlib/malloc-standard/realloc.c237
9 files changed, 3147 insertions, 0 deletions
diff --git a/libc/stdlib/malloc-standard/Makefile b/libc/stdlib/malloc-standard/Makefile
new file mode 100644
index 000000000..2b87c5de2
--- /dev/null
+++ b/libc/stdlib/malloc-standard/Makefile
@@ -0,0 +1,51 @@
+# Makefile for uClibc
+#
+# Copyright (C) 2000 by Lineo, inc.
+# Copyright (C) 2000,2001 Erik Andersen <andersen@uclibc.org>
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU Library General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option) any
+# later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Library General Public License
+# along with this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# Derived in part from the Linux-8086 C library, the GNU C Library, and several
+# other sundry sources. Files within this library are copyright by their
+# respective copyright holders.
+
+TOPDIR=../../../
+include $(TOPDIR)Rules.mak
+
+# Turn on malloc debugging if requested
+ifeq ($(UCLIBC_MALLOC_DEBUGGING),y)
+CFLAGS += -D__MALLOC_DEBUGGING
+endif
+
+# calloc.c can be found at uClibc/libc/stdlib/calloc.c
+# valloc.c can be found at uClibc/libc/stdlib/valloc.c
+CSRC=malloc.c calloc.c realloc.c free.c memalign.c mallopt.c mallinfo.c
+COBJS=$(patsubst %.c,%.o, $(CSRC))
+OBJS=$(COBJS)
+
+all: $(OBJS) $(LIBC)
+
+$(LIBC): ar-target
+
+ar-target: $(OBJS)
+ $(AR) $(ARFLAGS) $(LIBC) $(OBJS)
+
+$(COBJS): %.o : %.c
+ $(CC) $(CFLAGS) -c $< -o $@
+ $(STRIPTOOL) -x -R .note -R .comment $*.o
+
+clean:
+ $(RM) *.[oa] *~ core
+
diff --git a/libc/stdlib/malloc-standard/calloc.c b/libc/stdlib/malloc-standard/calloc.c
new file mode 100644
index 000000000..cff0adab8
--- /dev/null
+++ b/libc/stdlib/malloc-standard/calloc.c
@@ -0,0 +1,93 @@
+/*
+ This is a version (aka dlmalloc) of malloc/free/realloc written by
+ Doug Lea and released to the public domain. Use, modify, and
+ redistribute this code without permission or acknowledgement in any
+ way you wish. Send questions, comments, complaints, performance
+ data, etc to dl@cs.oswego.edu
+
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+*/
+
+#include "malloc.h"
+
+
+/* ------------------------------ calloc ------------------------------ */
+void* calloc(size_t n_elements, size_t elem_size)
+{
+ mchunkptr p;
+ unsigned long clearsize;
+ unsigned long nclears;
+ size_t size, *d;
+ void* mem;
+
+
+ /* guard vs integer overflow, but allow nmemb
+ * to fall through and call malloc(0) */
+ size=lsize * nmemb;
+ if (n_elements && elem_size != (size / n_elements)) {
+ __set_errno(ENOMEM);
+ return NULL;
+ }
+
+ LOCK;
+ mem = malloc(size);
+ if (mem != 0) {
+ p = mem2chunk(mem);
+
+ if (!chunk_is_mmapped(p))
+ {
+ /*
+ Unroll clear of <= 36 bytes (72 if 8byte sizes)
+ We know that contents have an odd number of
+ size_t-sized words; minimally 3.
+ */
+
+ d = (size_t*)mem;
+ clearsize = chunksize(p) - (sizeof(size_t));
+ nclears = clearsize / sizeof(size_t);
+ assert(nclears >= 3);
+
+ if (nclears > 9)
+ memset(d, 0, clearsize);
+
+ else {
+ *(d+0) = 0;
+ *(d+1) = 0;
+ *(d+2) = 0;
+ if (nclears > 4) {
+ *(d+3) = 0;
+ *(d+4) = 0;
+ if (nclears > 6) {
+ *(d+5) = 0;
+ *(d+6) = 0;
+ if (nclears > 8) {
+ *(d+7) = 0;
+ *(d+8) = 0;
+ }
+ }
+ }
+ }
+ }
+#if 0
+ else
+ {
+ /* Standard unix mmap using /dev/zero clears memory so calloc
+ * doesn't need to actually zero anything....
+ */
+ d = (size_t*)mem;
+ /* Note the additional (sizeof(size_t)) */
+ clearsize = chunksize(p) - 2*(sizeof(size_t));
+ memset(d, 0, clearsize);
+ }
+#endif
+ }
+ UNLOCK;
+ return mem;
+}
+
diff --git a/libc/stdlib/malloc-standard/free.c b/libc/stdlib/malloc-standard/free.c
new file mode 100644
index 000000000..4277767fa
--- /dev/null
+++ b/libc/stdlib/malloc-standard/free.c
@@ -0,0 +1,382 @@
+/*
+ This is a version (aka dlmalloc) of malloc/free/realloc written by
+ Doug Lea and released to the public domain. Use, modify, and
+ redistribute this code without permission or acknowledgement in any
+ way you wish. Send questions, comments, complaints, performance
+ data, etc to dl@cs.oswego.edu
+
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+*/
+
+#include "malloc.h"
+
+
+/* ------------------------- __malloc_trim -------------------------
+ __malloc_trim is an inverse of sorts to __malloc_alloc. It gives memory
+ back to the system (via negative arguments to sbrk) if there is unused
+ memory at the `high' end of the malloc pool. It is called automatically by
+ free() when top space exceeds the trim threshold. It is also called by the
+ public malloc_trim routine. It returns 1 if it actually released any
+ memory, else 0.
+*/
+static int __malloc_trim(size_t pad, mstate av)
+{
+ long top_size; /* Amount of top-most memory */
+ long extra; /* Amount to release */
+ long released; /* Amount actually released */
+ char* current_brk; /* address returned by pre-check sbrk call */
+ char* new_brk; /* address returned by post-check sbrk call */
+ size_t pagesz;
+
+ pagesz = av->pagesize;
+ top_size = chunksize(av->top);
+
+ /* Release in pagesize units, keeping at least one page */
+ extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
+
+ if (extra > 0) {
+
+ /*
+ Only proceed if end of memory is where we last set it.
+ This avoids problems if there were foreign sbrk calls.
+ */
+ current_brk = (char*)(MORECORE(0));
+ if (current_brk == (char*)(av->top) + top_size) {
+
+ /*
+ Attempt to release memory. We ignore MORECORE return value,
+ and instead call again to find out where new end of memory is.
+ This avoids problems if first call releases less than we asked,
+ of if failure somehow altered brk value. (We could still
+ encounter problems if it altered brk in some very bad way,
+ but the only thing we can do is adjust anyway, which will cause
+ some downstream failure.)
+ */
+
+ MORECORE(-extra);
+ new_brk = (char*)(MORECORE(0));
+
+ if (new_brk != (char*)MORECORE_FAILURE) {
+ released = (long)(current_brk - new_brk);
+
+ if (released != 0) {
+ /* Success. Adjust top. */
+ av->sbrked_mem -= released;
+ set_head(av->top, (top_size - released) | PREV_INUSE);
+ check_malloc_state();
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ Initialize a malloc_state struct.
+
+ This is called only from within __malloc_consolidate, which needs
+ be called in the same contexts anyway. It is never called directly
+ outside of __malloc_consolidate because some optimizing compilers try
+ to inline it at all call points, which turns out not to be an
+ optimization at all. (Inlining it in __malloc_consolidate is fine though.)
+*/
+static void malloc_init_state(mstate av)
+{
+ int i;
+ mbinptr bin;
+
+ /* Establish circular links for normal bins */
+ for (i = 1; i < NBINS; ++i) {
+ bin = bin_at(av,i);
+ bin->fd = bin->bk = bin;
+ }
+
+ av->top_pad = DEFAULT_TOP_PAD;
+ av->n_mmaps_max = DEFAULT_MMAP_MAX;
+ av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
+ av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
+
+#if MORECORE_CONTIGUOUS
+ set_contiguous(av);
+#else
+ set_noncontiguous(av);
+#endif
+
+
+ set_max_fast(av, DEFAULT_MXFAST);
+
+ av->top = initial_top(av);
+ av->pagesize = malloc_getpagesize;
+}
+
+
+/* ----------------------------------------------------------------------
+ *
+ * PUBLIC STUFF
+ *
+ * ----------------------------------------------------------------------*/
+
+
+/* ------------------------- __malloc_consolidate -------------------------
+
+ __malloc_consolidate is a specialized version of free() that tears
+ down chunks held in fastbins. Free itself cannot be used for this
+ purpose since, among other things, it might place chunks back onto
+ fastbins. So, instead, we need to use a minor variant of the same
+ code.
+
+ Also, because this routine needs to be called the first time through
+ malloc anyway, it turns out to be the perfect place to trigger
+ initialization code.
+*/
+void __malloc_consolidate(mstate av)
+{
+ mfastbinptr* fb; /* current fastbin being consolidated */
+ mfastbinptr* maxfb; /* last fastbin (for loop control) */
+ mchunkptr p; /* current chunk being consolidated */
+ mchunkptr nextp; /* next chunk to consolidate */
+ mchunkptr unsorted_bin; /* bin header */
+ mchunkptr first_unsorted; /* chunk to link to */
+
+ /* These have same use as in free() */
+ mchunkptr nextchunk;
+ size_t size;
+ size_t nextsize;
+ size_t prevsize;
+ int nextinuse;
+ mchunkptr bck;
+ mchunkptr fwd;
+
+ /*
+ If max_fast is 0, we know that av hasn't
+ yet been initialized, in which case do so below
+ */
+
+ if (av->max_fast != 0) {
+ clear_fastchunks(av);
+
+ unsorted_bin = unsorted_chunks(av);
+
+ /*
+ Remove each chunk from fast bin and consolidate it, placing it
+ then in unsorted bin. Among other reasons for doing this,
+ placing in unsorted bin avoids needing to calculate actual bins
+ until malloc is sure that chunks aren't immediately going to be
+ reused anyway.
+ */
+
+ maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
+ fb = &(av->fastbins[0]);
+ do {
+ if ( (p = *fb) != 0) {
+ *fb = 0;
+
+ do {
+ check_inuse_chunk(p);
+ nextp = p->fd;
+
+ /* Slightly streamlined version of consolidation code in free() */
+ size = p->size & ~PREV_INUSE;
+ nextchunk = chunk_at_offset(p, size);
+ nextsize = chunksize(nextchunk);
+
+ if (!prev_inuse(p)) {
+ prevsize = p->prev_size;
+ size += prevsize;
+ p = chunk_at_offset(p, -((long) prevsize));
+ unlink(p, bck, fwd);
+ }
+
+ if (nextchunk != av->top) {
+ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+ set_head(nextchunk, nextsize);
+
+ if (!nextinuse) {
+ size += nextsize;
+ unlink(nextchunk, bck, fwd);
+ }
+
+ first_unsorted = unsorted_bin->fd;
+ unsorted_bin->fd = p;
+ first_unsorted->bk = p;
+
+ set_head(p, size | PREV_INUSE);
+ p->bk = unsorted_bin;
+ p->fd = first_unsorted;
+ set_foot(p, size);
+ }
+
+ else {
+ size += nextsize;
+ set_head(p, size | PREV_INUSE);
+ av->top = p;
+ }
+
+ } while ( (p = nextp) != 0);
+
+ }
+ } while (fb++ != maxfb);
+ }
+ else {
+ malloc_init_state(av);
+ check_malloc_state();
+ }
+}
+
+
+/* ------------------------------ free ------------------------------ */
+void free(void* mem)
+{
+ mstate av;
+
+ mchunkptr p; /* chunk corresponding to mem */
+ size_t size; /* its size */
+ mfastbinptr* fb; /* associated fastbin */
+ mchunkptr nextchunk; /* next contiguous chunk */
+ size_t nextsize; /* its size */
+ int nextinuse; /* true if nextchunk is used */
+ size_t prevsize; /* size of previous contiguous chunk */
+ mchunkptr bck; /* misc temp for linking */
+ mchunkptr fwd; /* misc temp for linking */
+
+ /* free(0) has no effect */
+ if (mem == NULL)
+ return;
+
+ LOCK;
+ av = get_malloc_state();
+ p = mem2chunk(mem);
+ size = chunksize(p);
+
+ check_inuse_chunk(p);
+
+ /*
+ If eligible, place chunk on a fastbin so it can be found
+ and used quickly in malloc.
+ */
+
+ if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
+
+#if TRIM_FASTBINS
+ /* If TRIM_FASTBINS set, don't place chunks
+ bordering top into fastbins */
+ && (chunk_at_offset(p, size) != av->top)
+#endif
+ ) {
+
+ set_fastchunks(av);
+ fb = &(av->fastbins[fastbin_index(size)]);
+ p->fd = *fb;
+ *fb = p;
+ }
+
+ /*
+ Consolidate other non-mmapped chunks as they arrive.
+ */
+
+ else if (!chunk_is_mmapped(p)) {
+ set_anychunks(av);
+
+ nextchunk = chunk_at_offset(p, size);
+ nextsize = chunksize(nextchunk);
+
+ /* consolidate backward */
+ if (!prev_inuse(p)) {
+ prevsize = p->prev_size;
+ size += prevsize;
+ p = chunk_at_offset(p, -((long) prevsize));
+ unlink(p, bck, fwd);
+ }
+
+ if (nextchunk != av->top) {
+ /* get and clear inuse bit */
+ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+ set_head(nextchunk, nextsize);
+
+ /* consolidate forward */
+ if (!nextinuse) {
+ unlink(nextchunk, bck, fwd);
+ size += nextsize;
+ }
+
+ /*
+ Place the chunk in unsorted chunk list. Chunks are
+ not placed into regular bins until after they have
+ been given one chance to be used in malloc.
+ */
+
+ bck = unsorted_chunks(av);
+ fwd = bck->fd;
+ p->bk = bck;
+ p->fd = fwd;
+ bck->fd = p;
+ fwd->bk = p;
+
+ set_head(p, size | PREV_INUSE);
+ set_foot(p, size);
+
+ check_free_chunk(p);
+ }
+
+ /*
+ If the chunk borders the current high end of memory,
+ consolidate into top
+ */
+
+ else {
+ size += nextsize;
+ set_head(p, size | PREV_INUSE);
+ av->top = p;
+ check_chunk(p);
+ }
+
+ /*
+ If freeing a large space, consolidate possibly-surrounding
+ chunks. Then, if the total unused topmost memory exceeds trim
+ threshold, ask malloc_trim to reduce top.
+
+ Unless max_fast is 0, we don't know if there are fastbins
+ bordering top, so we cannot tell for sure whether threshold
+ has been reached unless fastbins are consolidated. But we
+ don't want to consolidate on each free. As a compromise,
+ consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
+ is reached.
+ */
+
+ if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
+ if (have_fastchunks(av))
+ __malloc_consolidate(av);
+
+ if ((unsigned long)(chunksize(av->top)) >=
+ (unsigned long)(av->trim_threshold))
+ __malloc_trim(av->top_pad, av);
+ }
+
+ }
+ /*
+ If the chunk was allocated via mmap, release via munmap()
+ Note that if HAVE_MMAP is false but chunk_is_mmapped is
+ true, then user must have overwritten memory. There's nothing
+ we can do to catch this error unless DEBUG is set, in which case
+ check_inuse_chunk (above) will have triggered error.
+ */
+
+ else {
+ int ret;
+ size_t offset = p->prev_size;
+ av->n_mmaps--;
+ av->mmapped_mem -= (size + offset);
+ ret = munmap((char*)p - offset, size + offset);
+ /* munmap returns non-zero on failure */
+ assert(ret == 0);
+ }
+ UNLOCK;
+}
+
diff --git a/libc/stdlib/malloc-standard/mallinfo.c b/libc/stdlib/malloc-standard/mallinfo.c
new file mode 100644
index 000000000..89d6b68e1
--- /dev/null
+++ b/libc/stdlib/malloc-standard/mallinfo.c
@@ -0,0 +1,81 @@
+/*
+ This is a version (aka dlmalloc) of malloc/free/realloc written by
+ Doug Lea and released to the public domain. Use, modify, and
+ redistribute this code without permission or acknowledgement in any
+ way you wish. Send questions, comments, complaints, performance
+ data, etc to dl@cs.oswego.edu
+
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+*/
+
+#include "malloc.h"
+
+
+/* ------------------------------ mallinfo ------------------------------ */
+struct mallinfo mallinfo(void)
+{
+ mstate av;
+ struct mallinfo mi;
+ int i;
+ mbinptr b;
+ mchunkptr p;
+ size_t avail;
+ size_t fastavail;
+ int nblocks;
+ int nfastblocks;
+
+ LOCK;
+ av = get_malloc_state();
+ /* Ensure initialization */
+ if (av->top == 0) {
+ __malloc_consolidate(av);
+ }
+
+ check_malloc_state();
+
+ /* Account for top */
+ avail = chunksize(av->top);
+ nblocks = 1; /* top always exists */
+
+ /* traverse fastbins */
+ nfastblocks = 0;
+ fastavail = 0;
+
+ for (i = 0; i < NFASTBINS; ++i) {
+ for (p = av->fastbins[i]; p != 0; p = p->fd) {
+ ++nfastblocks;
+ fastavail += chunksize(p);
+ }
+ }
+
+ avail += fastavail;
+
+ /* traverse regular bins */
+ for (i = 1; i < NBINS; ++i) {
+ b = bin_at(av, i);
+ for (p = last(b); p != b; p = p->bk) {
+ ++nblocks;
+ avail += chunksize(p);
+ }
+ }
+
+ mi.smblks = nfastblocks;
+ mi.ordblks = nblocks;
+ mi.fordblks = avail;
+ mi.uordblks = av->sbrked_mem - avail;
+ mi.arena = av->sbrked_mem;
+ mi.hblks = av->n_mmaps;
+ mi.hblkhd = av->mmapped_mem;
+ mi.fsmblks = fastavail;
+ mi.keepcost = chunksize(av->top);
+ mi.usmblks = av->max_total_mem;
+ UNLOCK;
+ return mi;
+}
+
diff --git a/libc/stdlib/malloc-standard/malloc.c b/libc/stdlib/malloc-standard/malloc.c
new file mode 100644
index 000000000..8d132a43e
--- /dev/null
+++ b/libc/stdlib/malloc-standard/malloc.c
@@ -0,0 +1,1160 @@
+/*
+ This is a version (aka dlmalloc) of malloc/free/realloc written by
+ Doug Lea and released to the public domain. Use, modify, and
+ redistribute this code without permission or acknowledgement in any
+ way you wish. Send questions, comments, complaints, performance
+ data, etc to dl@cs.oswego.edu
+
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+*/
+
+#define _GNU_SOURCE
+#include "malloc.h"
+
+
+#ifdef __UCLIBC_HAS_THREADS__
+pthread_mutex_t __malloc_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+#endif
+
+/*
+ There is exactly one instance of this struct in this malloc.
+ If you are adapting this malloc in a way that does NOT use a static
+ malloc_state, you MUST explicitly zero-fill it before using. This
+ malloc relies on the property that malloc_state is initialized to
+ all zeroes (as is true of C statics).
+*/
+struct malloc_state __malloc_state; /* never directly referenced */
+
+/* forward declaration */
+static int __malloc_largebin_index(unsigned int sz);
+
+#ifdef __MALLOC_DEBUGGING
+
+/*
+ Debugging support
+
+ Because freed chunks may be overwritten with bookkeeping fields, this
+ malloc will often die when freed memory is overwritten by user
+ programs. This can be very effective (albeit in an annoying way)
+ in helping track down dangling pointers.
+
+ If you compile with -D__MALLOC_DEBUGGING, a number of assertion checks are
+ enabled that will catch more memory errors. You probably won't be
+ able to make much sense of the actual assertion errors, but they
+ should help you locate incorrectly overwritten memory. The
+ checking is fairly extensive, and will slow down execution
+ noticeably. Calling malloc_stats or mallinfo with __MALLOC_DEBUGGING set will
+ attempt to check every non-mmapped allocated and free chunk in the
+ course of computing the summmaries. (By nature, mmapped regions
+ cannot be checked very much automatically.)
+
+ Setting __MALLOC_DEBUGGING may also be helpful if you are trying to modify
+ this code. The assertions in the check routines spell out in more
+ detail the assumptions and invariants underlying the algorithms.
+
+ Setting __MALLOC_DEBUGGING does NOT provide an automated mechanism for checking
+ that all accesses to malloced memory stay within their
+ bounds. However, there are several add-ons and adaptations of this
+ or other mallocs available that do this.
+*/
+
+/* Properties of all chunks */
+void __do_check_chunk(mchunkptr p)
+{
+ mstate av = get_malloc_state();
+#ifdef __DOASSERTS__
+ /* min and max possible addresses assuming contiguous allocation */
+ char* max_address = (char*)(av->top) + chunksize(av->top);
+ char* min_address = max_address - av->sbrked_mem;
+ unsigned long sz = chunksize(p);
+#endif
+
+ if (!chunk_is_mmapped(p)) {
+
+ /* Has legal address ... */
+ if (p != av->top) {
+ if (contiguous(av)) {
+ assert(((char*)p) >= min_address);
+ assert(((char*)p + sz) <= ((char*)(av->top)));
+ }
+ }
+ else {
+ /* top size is always at least MINSIZE */
+ assert((unsigned long)(sz) >= MINSIZE);
+ /* top predecessor always marked inuse */
+ assert(prev_inuse(p));
+ }
+
+ }
+ else {
+ /* address is outside main heap */
+ if (contiguous(av) && av->top != initial_top(av)) {
+ assert(((char*)p) < min_address || ((char*)p) > max_address);
+ }
+ /* chunk is page-aligned */
+ assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
+ /* mem is aligned */
+ assert(aligned_OK(chunk2mem(p)));
+ }
+}
+
+/* Properties of free chunks */
+void __do_check_free_chunk(mchunkptr p)
+{
+ size_t sz = p->size & ~PREV_INUSE;
+#ifdef __DOASSERTS__
+ mstate av = get_malloc_state();
+ mchunkptr next = chunk_at_offset(p, sz);
+#endif
+
+ __do_check_chunk(p);
+
+ /* Chunk must claim to be free ... */
+ assert(!inuse(p));
+ assert (!chunk_is_mmapped(p));
+
+ /* Unless a special marker, must have OK fields */
+ if ((unsigned long)(sz) >= MINSIZE)
+ {
+ assert((sz & MALLOC_ALIGN_MASK) == 0);
+ assert(aligned_OK(chunk2mem(p)));
+ /* ... matching footer field */
+ assert(next->prev_size == sz);
+ /* ... and is fully consolidated */
+ assert(prev_inuse(p));
+ assert (next == av->top || inuse(next));
+
+ /* ... and has minimally sane links */
+ assert(p->fd->bk == p);
+ assert(p->bk->fd == p);
+ }
+ else /* markers are always of size (sizeof(size_t)) */
+ assert(sz == (sizeof(size_t)));
+}
+
+/* Properties of inuse chunks */
+void __do_check_inuse_chunk(mchunkptr p)
+{
+ mstate av = get_malloc_state();
+ mchunkptr next;
+ __do_check_chunk(p);
+
+ if (chunk_is_mmapped(p))
+ return; /* mmapped chunks have no next/prev */
+
+ /* Check whether it claims to be in use ... */
+ assert(inuse(p));
+
+ next = next_chunk(p);
+
+ /* ... and is surrounded by OK chunks.
+ Since more things can be checked with free chunks than inuse ones,
+ if an inuse chunk borders them and debug is on, it's worth doing them.
+ */
+ if (!prev_inuse(p)) {
+ /* Note that we cannot even look at prev unless it is not inuse */
+ mchunkptr prv = prev_chunk(p);
+ assert(next_chunk(prv) == p);
+ __do_check_free_chunk(prv);
+ }
+
+ if (next == av->top) {
+ assert(prev_inuse(next));
+ assert(chunksize(next) >= MINSIZE);
+ }
+ else if (!inuse(next))
+ __do_check_free_chunk(next);
+}
+
+/* Properties of chunks recycled from fastbins */
+void __do_check_remalloced_chunk(mchunkptr p, size_t s)
+{
+#ifdef __DOASSERTS__
+ size_t sz = p->size & ~PREV_INUSE;
+#endif
+
+ __do_check_inuse_chunk(p);
+
+ /* Legal size ... */
+ assert((sz & MALLOC_ALIGN_MASK) == 0);
+ assert((unsigned long)(sz) >= MINSIZE);
+ /* ... and alignment */
+ assert(aligned_OK(chunk2mem(p)));
+ /* chunk is less than MINSIZE more than request */
+ assert((long)(sz) - (long)(s) >= 0);
+ assert((long)(sz) - (long)(s + MINSIZE) < 0);
+}
+
+/* Properties of nonrecycled chunks at the point they are malloced */
+void __do_check_malloced_chunk(mchunkptr p, size_t s)
+{
+ /* same as recycled case ... */
+ __do_check_remalloced_chunk(p, s);
+
+ /*
+ ... plus, must obey implementation invariant that prev_inuse is
+ always true of any allocated chunk; i.e., that each allocated
+ chunk borders either a previously allocated and still in-use
+ chunk, or the base of its memory arena. This is ensured
+ by making all allocations from the the `lowest' part of any found
+ chunk. This does not necessarily hold however for chunks
+ recycled via fastbins.
+ */
+
+ assert(prev_inuse(p));
+}
+
+
+/*
+ Properties of malloc_state.
+
+ This may be useful for debugging malloc, as well as detecting user
+ programmer errors that somehow write into malloc_state.
+
+ If you are extending or experimenting with this malloc, you can
+ probably figure out how to hack this routine to print out or
+ display chunk addresses, sizes, bins, and other instrumentation.
+*/
+void __do_check_malloc_state(void)
+{
+ mstate av = get_malloc_state();
+ int i;
+ mchunkptr p;
+ mchunkptr q;
+ mbinptr b;
+ unsigned int binbit;
+ int empty;
+ unsigned int idx;
+ size_t size;
+ unsigned long total = 0;
+ int max_fast_bin;
+
+ /* internal size_t must be no wider than pointer type */
+ assert(sizeof(size_t) <= sizeof(char*));
+
+ /* alignment is a power of 2 */
+ assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
+
+ /* cannot run remaining checks until fully initialized */
+ if (av->top == 0 || av->top == initial_top(av))
+ return;
+
+ /* pagesize is a power of 2 */
+ assert((av->pagesize & (av->pagesize-1)) == 0);
+
+ /* properties of fastbins */
+
+ /* max_fast is in allowed range */
+ assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
+
+ max_fast_bin = fastbin_index(av->max_fast);
+
+ for (i = 0; i < NFASTBINS; ++i) {
+ p = av->fastbins[i];
+
+ /* all bins past max_fast are empty */
+ if (i > max_fast_bin)
+ assert(p == 0);
+
+ while (p != 0) {
+ /* each chunk claims to be inuse */
+ __do_check_inuse_chunk(p);
+ total += chunksize(p);
+ /* chunk belongs in this bin */
+ assert(fastbin_index(chunksize(p)) == i);
+ p = p->fd;
+ }
+ }
+
+ if (total != 0)
+ assert(have_fastchunks(av));
+ else if (!have_fastchunks(av))
+ assert(total == 0);
+
+ /* check normal bins */
+ for (i = 1; i < NBINS; ++i) {
+ b = bin_at(av,i);
+
+ /* binmap is accurate (except for bin 1 == unsorted_chunks) */
+ if (i >= 2) {
+ binbit = get_binmap(av,i);
+ empty = last(b) == b;
+ if (!binbit)
+ assert(empty);
+ else if (!empty)
+ assert(binbit);
+ }
+
+ for (p = last(b); p != b; p = p->bk) {
+ /* each chunk claims to be free */
+ __do_check_free_chunk(p);
+ size = chunksize(p);
+ total += size;
+ if (i >= 2) {
+ /* chunk belongs in bin */
+ idx = bin_index(size);
+ assert(idx == i);
+ /* lists are sorted */
+ if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
+ assert(p->bk == b ||
+ (unsigned long)chunksize(p->bk) >=
+ (unsigned long)chunksize(p));
+ }
+ }
+ /* chunk is followed by a legal chain of inuse chunks */
+ for (q = next_chunk(p);
+ (q != av->top && inuse(q) &&
+ (unsigned long)(chunksize(q)) >= MINSIZE);
+ q = next_chunk(q))
+ __do_check_inuse_chunk(q);
+ }
+ }
+
+ /* top chunk is OK */
+ __do_check_chunk(av->top);
+
+ /* sanity checks for statistics */
+
+ assert(total <= (unsigned long)(av->max_total_mem));
+ assert(av->n_mmaps >= 0);
+ assert(av->n_mmaps <= av->max_n_mmaps);
+
+ assert((unsigned long)(av->sbrked_mem) <=
+ (unsigned long)(av->max_sbrked_mem));
+
+ assert((unsigned long)(av->mmapped_mem) <=
+ (unsigned long)(av->max_mmapped_mem));
+
+ assert((unsigned long)(av->max_total_mem) >=
+ (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
+}
+#endif
+
+
+/* ----------- Routines dealing with system allocation -------------- */
+
+/*
+ sysmalloc handles malloc cases requiring more memory from the system.
+ On entry, it is assumed that av->top does not have enough
+ space to service request for nb bytes, thus requiring that av->top
+ be extended or replaced.
+*/
+static void* __malloc_alloc(size_t nb, mstate av)
+{
+ mchunkptr old_top; /* incoming value of av->top */
+ size_t old_size; /* its size */
+ char* old_end; /* its end address */
+
+ long size; /* arg to first MORECORE or mmap call */
+ char* brk; /* return value from MORECORE */
+
+ long correction; /* arg to 2nd MORECORE call */
+ char* snd_brk; /* 2nd return val */
+
+ size_t front_misalign; /* unusable bytes at front of new space */
+ size_t end_misalign; /* partial page left at end of new space */
+ char* aligned_brk; /* aligned offset into brk */
+
+ mchunkptr p; /* the allocated/returned chunk */
+ mchunkptr remainder; /* remainder from allocation */
+ unsigned long remainder_size; /* its size */
+
+ unsigned long sum; /* for updating stats */
+
+ size_t pagemask = av->pagesize - 1;
+
+ /*
+ If there is space available in fastbins, consolidate and retry
+ malloc from scratch rather than getting memory from system. This
+ can occur only if nb is in smallbin range so we didn't consolidate
+ upon entry to malloc. It is much easier to handle this case here
+ than in malloc proper.
+ */
+
+ if (have_fastchunks(av)) {
+ assert(in_smallbin_range(nb));
+ __malloc_consolidate(av);
+ return malloc(nb - MALLOC_ALIGN_MASK);
+ }
+
+
+ /*
+ If have mmap, and the request size meets the mmap threshold, and
+ the system supports mmap, and there are few enough currently
+ allocated mmapped regions, try to directly map this request
+ rather than expanding top.
+ */
+
+ if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) &&
+ (av->n_mmaps < av->n_mmaps_max)) {
+
+ char* mm; /* return value from mmap call*/
+
+ /*
+ Round up size to nearest page. For mmapped chunks, the overhead
+ is one (sizeof(size_t)) unit larger than for normal chunks, because there
+ is no following chunk whose prev_size field could be used.
+ */
+ size = (nb + (sizeof(size_t)) + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
+
+ /* Don't try if size wraps around 0 */
+ if ((unsigned long)(size) > (unsigned long)(nb)) {
+
+ mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
+