diff options
author | Eric Andersen <andersen@codepoet.org> | 2002-09-05 05:54:26 +0000 |
---|---|---|
committer | Eric Andersen <andersen@codepoet.org> | 2002-09-05 05:54:26 +0000 |
commit | ccf964b4c2b384d4e005a358f781e49cd6f89c68 (patch) | |
tree | 05c2b332ff646da2c0c7736f5551e0126d239874 /libc/stdlib/malloc-930716 | |
parent | e72144d6265637d1e34bd79f5e373b4ff98d4d29 (diff) |
split-out memalign and realloc
-Erik
Diffstat (limited to 'libc/stdlib/malloc-930716')
-rw-r--r-- | libc/stdlib/malloc-930716/Makefile | 2 | ||||
-rw-r--r-- | libc/stdlib/malloc-930716/malloc.c | 217 | ||||
-rw-r--r-- | libc/stdlib/malloc-930716/malloc.h | 27 | ||||
-rw-r--r-- | libc/stdlib/malloc-930716/memalign.c | 67 | ||||
-rw-r--r-- | libc/stdlib/malloc-930716/realloc.c | 140 |
5 files changed, 261 insertions, 192 deletions
diff --git a/libc/stdlib/malloc-930716/Makefile b/libc/stdlib/malloc-930716/Makefile index 9b4b90fb1..a0a5f7b90 100644 --- a/libc/stdlib/malloc-930716/Makefile +++ b/libc/stdlib/malloc-930716/Makefile @@ -26,7 +26,7 @@ include $(TOPDIR)Rules.mak # calloc.c can be found at uClibc/libc/stdlib/calloc.c # valloc.c can be found at uClibc/libc/stdlib/valloc.c -CSRC=malloc.c +CSRC=malloc.c memalign.c realloc.c COBJS=$(patsubst %.c,%.o, $(CSRC)) OBJS=$(COBJS) diff --git a/libc/stdlib/malloc-930716/malloc.c b/libc/stdlib/malloc-930716/malloc.c index 545f0e347..db93311a2 100644 --- a/libc/stdlib/malloc-930716/malloc.c +++ b/libc/stdlib/malloc-930716/malloc.c @@ -17,62 +17,45 @@ #include <unistd.h> #include "malloc.h" -#define MIN(x,y) ({ \ - const typeof(x) _x = (x); \ - const typeof(y) _y = (y); \ - (void) (&_x == &_y); \ - _x < _y ? _x : _y; }) - - #ifdef __UCLIBC_HAS_THREADS__ #include <pthread.h> -static pthread_mutex_t malloclock = PTHREAD_MUTEX_INITIALIZER; -# define LOCK pthread_mutex_lock(&malloclock) -# define UNLOCK pthread_mutex_unlock(&malloclock); +pthread_mutex_t __malloclock = PTHREAD_MUTEX_INITIALIZER; +# define LOCK pthread_mutex_lock(&__malloclock) +# define UNLOCK pthread_mutex_unlock(&__malloclock); #else # define LOCK # define UNLOCK #endif - -static void * malloc_unlocked (size_t size); -static void free_unlocked(void *ptr); -/* How to really get more memory. */ -static void * __morecore(long size); -/* Pointer to the base of the first block. */ -static char *_heapbase; +/* Stuff that is shared across .o files */ +/* Pointer to the base of the first block. */ +char *_heapbase; /* Block information table. */ -static union info *_heapinfo; - -/* Number of info entries. */ -static size_t heapsize; - +union info *_heapinfo; /* Search index in the info table. */ -static size_t _heapindex; - +size_t _heapindex; /* Limit of valid info table indices. */ -static size_t _heaplimit; +size_t _heaplimit; +/* List of blocks allocated with memalign or valloc */ +struct alignlist *_aligned_blocks; + + +/* Stuff that is local to this .o file only */ + +/* How to really get more memory. */ +static void * __morecore(long size); +/* Number of info entries. */ +static size_t heapsize; /* Count of large blocks allocated for each fragment size. */ static size_t _fragblocks[BLOCKLOG]; - /* Free lists for each fragment size. */ static struct list _fraghead[BLOCKLOG]; - /* Are we experienced? */ static int initialized; -/* List of blocks allocated with memalign or valloc */ -struct alignlist -{ - struct alignlist *next; - __ptr_t aligned; /* The address that memaligned returned. */ - __ptr_t exact; /* The address that malloc returned. */ -}; -static struct alignlist *_aligned_blocks; - /* Aligned allocation. * Called within the lock in initialize() and morecore(), @@ -141,7 +124,7 @@ static void * morecore(size_t size) newinfo[BLOCK(oldinfo)].busy.info.size = BLOCKIFY(heapsize * sizeof (union info)); _heapinfo = newinfo; - free_unlocked(oldinfo); + __free_unlocked(oldinfo); heapsize = newsize; } @@ -166,12 +149,12 @@ void * malloc (size_t size) { void * ptr; LOCK; - ptr = malloc_unlocked(size); + ptr = __malloc_unlocked(size); UNLOCK; return(ptr); } -static void * malloc_unlocked (size_t size) +void * __malloc_unlocked (size_t size) { void *result; size_t log, block, blocks, i, lastblocks, start; @@ -216,7 +199,7 @@ static void * malloc_unlocked (size_t size) } else { /* No free fragments of the desired size, so get a new block and break it into fragments, returning the first. */ - result = malloc_unlocked(BLOCKSIZE); + result = __malloc_unlocked(BLOCKSIZE); if (!result) { return NULL; } @@ -327,11 +310,11 @@ void free(void *ptr) } } - free_unlocked(ptr); + __free_unlocked(ptr); UNLOCK; } -static void free_unlocked(void *ptr) +void __free_unlocked(void *ptr) { int block, blocks, i, type; struct list *prev, *next; @@ -418,7 +401,7 @@ static void free_unlocked(void *ptr) next->prev = prev->prev; _heapinfo[block].busy.type = 0; _heapinfo[block].busy.info.size = 1; - free_unlocked(ADDRESS(block)); + __free_unlocked(ADDRESS(block)); } else if (_heapinfo[block].busy.info.frag.nfree) { /* If some fragments of this block are free, link this fragment into the fragment list after the first free fragment of @@ -449,151 +432,3 @@ static void free_unlocked(void *ptr) } } -/* Resize the given region to the new size, returning a pointer - to the (possibly moved) region. This is optimized for speed; - some benchmarks seem to indicate that greater compactness is - achieved by unconditionally allocating and copying to a - new region. */ -void * realloc (void *ptr, size_t size) -{ - void *result, *previous; - size_t block, blocks, type; - size_t oldlimit; - - if (!ptr) - return malloc(size); - if (!size) { - LOCK; - free_unlocked(ptr); - result = malloc_unlocked(0); - UNLOCK; - return(result); - } - - LOCK; - block = BLOCK(ptr); - - switch (type = _heapinfo[block].busy.type) { - case 0: - /* Maybe reallocate a large block to a small fragment. */ - if (size <= BLOCKSIZE / 2) { - if ((result = malloc_unlocked(size)) != NULL) { - memcpy(result, ptr, size); - free_unlocked(ptr); - } - UNLOCK; - return result; - } - - /* The new size is a large allocation as well; see if - we can hold it in place. */ - blocks = BLOCKIFY(size); - if (blocks < _heapinfo[block].busy.info.size) { - /* The new size is smaller; return excess memory - to the free list. */ - _heapinfo[block + blocks].busy.type = 0; - _heapinfo[block + blocks].busy.info.size - = _heapinfo[block].busy.info.size - blocks; - _heapinfo[block].busy.info.size = blocks; - free_unlocked(ADDRESS(block + blocks)); - UNLOCK; - return ptr; - } else if (blocks == _heapinfo[block].busy.info.size) { - /* No size change necessary. */ - UNLOCK; - return ptr; - } else { - /* Won't fit, so allocate a new region that will. Free - the old region first in case there is sufficient adjacent - free space to grow without moving. */ - blocks = _heapinfo[block].busy.info.size; - /* Prevent free from actually returning memory to the system. */ - oldlimit = _heaplimit; - _heaplimit = 0; - free_unlocked(ptr); - _heaplimit = oldlimit; - result = malloc_unlocked(size); - if (!result) { - /* Now we're really in trouble. We have to unfree - the thing we just freed. Unfortunately it might - have been coalesced with its neighbors. */ - if (_heapindex == block) - malloc_unlocked(blocks * BLOCKSIZE); - else { - previous = malloc_unlocked((block - _heapindex) * BLOCKSIZE); - malloc_unlocked(blocks * BLOCKSIZE); - free_unlocked(previous); - } - UNLOCK; - return NULL; - } - if (ptr != result) - memmove(result, ptr, blocks * BLOCKSIZE); - UNLOCK; - return result; - } - break; - - default: - /* Old size is a fragment; type is logarithm to base two of - the fragment size. */ - if ((size > 1 << (type - 1)) && (size <= 1 << type)) { - /* New size is the same kind of fragment. */ - UNLOCK; - return ptr; - } - else { - /* New size is different; allocate a new space, and copy - the lesser of the new size and the old. */ - result = malloc_unlocked(size); - if (!result) { - UNLOCK; - return NULL; - } - memcpy(result, ptr, MIN(size, (size_t)(1 << type))); - free_unlocked(ptr); - UNLOCK; - return result; - } - break; - } - UNLOCK; -} - -__ptr_t memalign (size_t alignment, size_t size) -{ - __ptr_t result; - unsigned long int adj; - - result = malloc (size + alignment - 1); - if (result == NULL) - return NULL; - adj = (unsigned long int) ((unsigned long int) ((char *) result - - (char *) NULL)) % alignment; - if (adj != 0) - { - struct alignlist *l; - LOCK; - for (l = _aligned_blocks; l != NULL; l = l->next) - if (l->aligned == NULL) - /* This slot is free. Use it. */ - break; - if (l == NULL) - { - l = (struct alignlist *) malloc (sizeof (struct alignlist)); - if (l == NULL) { - free_unlocked (result); - UNLOCK; - return NULL; - } - l->next = _aligned_blocks; - _aligned_blocks = l; - } - l->exact = result; - result = l->aligned = (char *) result + alignment - adj; - UNLOCK; - } - - return result; -} - diff --git a/libc/stdlib/malloc-930716/malloc.h b/libc/stdlib/malloc-930716/malloc.h index fc21a13cc..bd315f788 100644 --- a/libc/stdlib/malloc-930716/malloc.h +++ b/libc/stdlib/malloc-930716/malloc.h @@ -10,6 +10,15 @@ #include <sys/cdefs.h> + +#define MIN(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + + + /* The allocator divides the heap into blocks of fixed size; large requests receive one or more whole blocks, and small requests receive a fragment of a block. Fragment sizes are powers of two, @@ -60,3 +69,21 @@ struct list { struct list *prev; }; +/* List of blocks allocated with memalign or valloc */ +struct alignlist +{ + struct alignlist *next; + __ptr_t aligned; /* The address that memaligned returned. */ + __ptr_t exact; /* The address that malloc returned. */ +}; +extern struct alignlist *_aligned_blocks; +extern char *_heapbase; +extern union info *_heapinfo; +extern size_t _heapindex; +extern size_t _heaplimit; + + +extern void *__malloc_unlocked (size_t size); +extern void __free_unlocked(void *ptr); + + diff --git a/libc/stdlib/malloc-930716/memalign.c b/libc/stdlib/malloc-930716/memalign.c new file mode 100644 index 000000000..eea460aad --- /dev/null +++ b/libc/stdlib/malloc-930716/memalign.c @@ -0,0 +1,67 @@ +/* malloc.c - C standard library routine. + Copyright (c) 1989, 1993 Michael J. Haertel + You may redistribute this library under the terms of the + GNU Library General Public License (version 2 or any later + version) as published by the Free Software Foundation. + THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED + WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR + WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS + SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */ + +#define _GNU_SOURCE +#include <features.h> +#include <limits.h> +#include <stddef.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include "malloc.h" + +#ifdef __UCLIBC_HAS_THREADS__ +#include <pthread.h> +extern pthread_mutex_t __malloclock; +# define LOCK pthread_mutex_lock(&__malloclock) +# define UNLOCK pthread_mutex_unlock(&__malloclock); +#else +# define LOCK +# define UNLOCK +#endif + + +__ptr_t memalign (size_t alignment, size_t size) +{ + __ptr_t result; + unsigned long int adj; + + result = malloc (size + alignment - 1); + if (result == NULL) + return NULL; + adj = (unsigned long int) ((unsigned long int) ((char *) result - + (char *) NULL)) % alignment; + if (adj != 0) + { + struct alignlist *l; + LOCK; + for (l = _aligned_blocks; l != NULL; l = l->next) + if (l->aligned == NULL) + /* This slot is free. Use it. */ + break; + if (l == NULL) + { + l = (struct alignlist *) malloc (sizeof (struct alignlist)); + if (l == NULL) { + __free_unlocked (result); + UNLOCK; + return NULL; + } + l->next = _aligned_blocks; + _aligned_blocks = l; + } + l->exact = result; + result = l->aligned = (char *) result + alignment - adj; + UNLOCK; + } + + return result; +} + diff --git a/libc/stdlib/malloc-930716/realloc.c b/libc/stdlib/malloc-930716/realloc.c new file mode 100644 index 000000000..2b486ced5 --- /dev/null +++ b/libc/stdlib/malloc-930716/realloc.c @@ -0,0 +1,140 @@ +/* malloc.c - C standard library routine. + Copyright (c) 1989, 1993 Michael J. Haertel + You may redistribute this library under the terms of the + GNU Library General Public License (version 2 or any later + version) as published by the Free Software Foundation. + THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED + WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR + WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS + SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */ + +#define _GNU_SOURCE +#include <features.h> +#include <limits.h> +#include <stddef.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include "malloc.h" + +#ifdef __UCLIBC_HAS_THREADS__ +#include <pthread.h> +extern pthread_mutex_t __malloclock; +# define LOCK pthread_mutex_lock(&__malloclock) +# define UNLOCK pthread_mutex_unlock(&__malloclock); +#else +# define LOCK +# define UNLOCK +#endif + +/* Resize the given region to the new size, returning a pointer + to the (possibly moved) region. This is optimized for speed; + some benchmarks seem to indicate that greater compactness is + achieved by unconditionally allocating and copying to a + new region. */ +void * realloc (void *ptr, size_t size) +{ + void *result, *previous; + size_t block, blocks, type; + size_t oldlimit; + + if (!ptr) + return malloc(size); + if (!size) { + LOCK; + __free_unlocked(ptr); + result = __malloc_unlocked(0); + UNLOCK; + return(result); + } + + LOCK; + block = BLOCK(ptr); + + switch (type = _heapinfo[block].busy.type) { + case 0: + /* Maybe reallocate a large block to a small fragment. */ + if (size <= BLOCKSIZE / 2) { + if ((result = __malloc_unlocked(size)) != NULL) { + memcpy(result, ptr, size); + __free_unlocked(ptr); + } + UNLOCK; + return result; + } + + /* The new size is a large allocation as well; see if + we can hold it in place. */ + blocks = BLOCKIFY(size); + if (blocks < _heapinfo[block].busy.info.size) { + /* The new size is smaller; return excess memory + to the free list. */ + _heapinfo[block + blocks].busy.type = 0; + _heapinfo[block + blocks].busy.info.size + = _heapinfo[block].busy.info.size - blocks; + _heapinfo[block].busy.info.size = blocks; + __free_unlocked(ADDRESS(block + blocks)); + UNLOCK; + return ptr; + } else if (blocks == _heapinfo[block].busy.info.size) { + /* No size change necessary. */ + UNLOCK; + return ptr; + } else { + /* Won't fit, so allocate a new region that will. Free + the old region first in case there is sufficient adjacent + free space to grow without moving. */ + blocks = _heapinfo[block].busy.info.size; + /* Prevent free from actually returning memory to the system. */ + oldlimit = _heaplimit; + _heaplimit = 0; + __free_unlocked(ptr); + _heaplimit = oldlimit; + result = __malloc_unlocked(size); + if (!result) { + /* Now we're really in trouble. We have to unfree + the thing we just freed. Unfortunately it might + have been coalesced with its neighbors. */ + if (_heapindex == block) + __malloc_unlocked(blocks * BLOCKSIZE); + else { + previous = __malloc_unlocked((block - _heapindex) * BLOCKSIZE); + __malloc_unlocked(blocks * BLOCKSIZE); + __free_unlocked(previous); + } + UNLOCK; + return NULL; + } + if (ptr != result) + memmove(result, ptr, blocks * BLOCKSIZE); + UNLOCK; + return result; + } + break; + + default: + /* Old size is a fragment; type is logarithm to base two of + the fragment size. */ + if ((size > 1 << (type - 1)) && (size <= 1 << type)) { + /* New size is the same kind of fragment. */ + UNLOCK; + return ptr; + } + else { + /* New size is different; allocate a new space, and copy + the lesser of the new size and the old. */ + result = __malloc_unlocked(size); + if (!result) { + UNLOCK; + return NULL; + } + memcpy(result, ptr, MIN(size, (size_t)(1 << type))); + __free_unlocked(ptr); + UNLOCK; + return result; + } + break; + } + UNLOCK; +} + |