diff options
author | Eric Andersen <andersen@codepoet.org> | 2003-12-30 10:40:49 +0000 |
---|---|---|
committer | Eric Andersen <andersen@codepoet.org> | 2003-12-30 10:40:49 +0000 |
commit | 8d532c51318bad2436880ecac972c9dfa3996c9b (patch) | |
tree | 821863358734242feb99643e9d66ee9b175ad464 /libc/stdlib/malloc-standard/memalign.c | |
parent | 4c9086ee4afde4257a4b4a8f55e05932d1b6acfd (diff) |
Rework malloc. The new default implementation is based on dlmalloc from Doug
Lea. It is about 2x faster than the old malloc-930716, and behave itself much
better -- it will properly release memory back to the system, and it uses a
combination of brk() for small allocations and mmap() for larger allocations.
-Erik
Diffstat (limited to 'libc/stdlib/malloc-standard/memalign.c')
-rw-r--r-- | libc/stdlib/malloc-standard/memalign.c | 126 |
1 files changed, 126 insertions, 0 deletions
diff --git a/libc/stdlib/malloc-standard/memalign.c b/libc/stdlib/malloc-standard/memalign.c new file mode 100644 index 000000000..bd9536272 --- /dev/null +++ b/libc/stdlib/malloc-standard/memalign.c @@ -0,0 +1,126 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain. Use, modify, and + redistribute this code without permission or acknowledgement in any + way you wish. Send questions, comments, complaints, performance + data, etc to dl@cs.oswego.edu + + VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + + Hacked up for uClibc by Erik Andersen <andersen@codepoet.org> +*/ + +#include <features.h> +#include <stddef.h> +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include "malloc.h" + + +/* ------------------------------ memalign ------------------------------ */ +void* memalign(size_t alignment, size_t bytes) +{ + size_t nb; /* padded request size */ + char* m; /* memory returned by malloc call */ + mchunkptr p; /* corresponding chunk */ + char* brk; /* alignment point within p */ + mchunkptr newp; /* chunk to return */ + size_t newsize; /* its size */ + size_t leadsize; /* leading space before alignment point */ + mchunkptr remainder; /* spare room at end to split off */ + unsigned long remainder_size; /* its size */ + size_t size; + + /* If need less alignment than we give anyway, just relay to malloc */ + + if (alignment <= MALLOC_ALIGNMENT) return malloc(bytes); + + /* Otherwise, ensure that it is at least a minimum chunk size */ + + if (alignment < MINSIZE) alignment = MINSIZE; + + /* Make sure alignment is power of 2 (in case MINSIZE is not). */ + if ((alignment & (alignment - 1)) != 0) { + size_t a = MALLOC_ALIGNMENT * 2; + while ((unsigned long)a < (unsigned long)alignment) a <<= 1; + alignment = a; + } + + LOCK; + checked_request2size(bytes, nb); + + /* Strategy: find a spot within that chunk that meets the alignment + * request, and then possibly free the leading and trailing space. */ + + + /* Call malloc with worst case padding to hit alignment. */ + + m = (char*)(malloc(nb + alignment + MINSIZE)); + + if (m == 0) { + UNLOCK; + return 0; /* propagate failure */ + } + + p = mem2chunk(m); + + if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */ + + /* + Find an aligned spot inside chunk. Since we need to give back + leading space in a chunk of at least MINSIZE, if the first + calculation places us at a spot with less than MINSIZE leader, + we can move to the next aligned spot -- we've allocated enough + total room so that this is always possible. + */ + + brk = (char*)mem2chunk((unsigned long)(((unsigned long)(m + alignment - 1)) & + -((signed long) alignment))); + if ((unsigned long)(brk - (char*)(p)) < MINSIZE) + brk += alignment; + + newp = (mchunkptr)brk; + leadsize = brk - (char*)(p); + newsize = chunksize(p) - leadsize; + + /* For mmapped chunks, just adjust offset */ + if (chunk_is_mmapped(p)) { + newp->prev_size = p->prev_size + leadsize; + set_head(newp, newsize|IS_MMAPPED); + UNLOCK; + return chunk2mem(newp); + } + + /* Otherwise, give back leader, use the rest */ + set_head(newp, newsize | PREV_INUSE); + set_inuse_bit_at_offset(newp, newsize); + set_head_size(p, leadsize); + free(chunk2mem(p)); + p = newp; + + assert (newsize >= nb && + (((unsigned long)(chunk2mem(p))) % alignment) == 0); + } + + /* Also give back spare room at the end */ + if (!chunk_is_mmapped(p)) { + size = chunksize(p); + if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { + remainder_size = size - nb; + remainder = chunk_at_offset(p, nb); + set_head(remainder, remainder_size | PREV_INUSE); + set_head_size(p, nb); + free(chunk2mem(remainder)); + } + } + + check_inuse_chunk(p); + UNLOCK; + return chunk2mem(p); +} + |