summaryrefslogtreecommitdiff
path: root/libc/stdlib/malloc/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'libc/stdlib/malloc/malloc.c')
-rw-r--r--libc/stdlib/malloc/malloc.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
index ce74c5608..0caf012a2 100644
--- a/libc/stdlib/malloc/malloc.c
+++ b/libc/stdlib/malloc/malloc.c
@@ -26,7 +26,8 @@ libc_hidden_proto(sbrk)
/* The malloc heap. We provide a bit of initial static space so that
programs can do a little mallocing without mmaping in more space. */
HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256);
-struct heap __malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+struct heap_free_area *__malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+malloc_mutex_t __malloc_heap_lock = PTHREAD_MUTEX_INITIALIZER;
#if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
/* A lock protecting our use of sbrk. */
@@ -43,12 +44,13 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
them from the main heap, but that tends to cause heap fragmentation in
annoying ways. */
HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
-struct heap __malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
#endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
static void *
-malloc_from_heap (size_t size, struct heap *heap)
+malloc_from_heap (size_t size, struct heap_free_area *heap, malloc_mutex_t *heap_lock)
{
void *mem;
@@ -57,12 +59,12 @@ malloc_from_heap (size_t size, struct heap *heap)
/* Include extra space to record the size of the allocated block. */
size += MALLOC_HEADER_SIZE;
- __heap_lock (heap);
+ __pthread_mutex_lock (heap_lock);
/* First try to get memory that's already in our heap. */
mem = __heap_alloc (heap, &size);
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
if (unlikely (! mem))
/* We couldn't allocate from the heap, so grab some more
@@ -126,7 +128,7 @@ malloc_from_heap (size_t size, struct heap *heap)
(long)block, (long)block + block_size, block_size);
/* Get back the heap lock. */
- __heap_lock (heap);
+ __pthread_mutex_lock (heap_lock);
/* Put BLOCK into the heap. */
__heap_free (heap, block, block_size);
@@ -136,7 +138,7 @@ malloc_from_heap (size_t size, struct heap *heap)
/* Try again to allocate. */
mem = __heap_alloc (heap, &size);
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
/* Insert a record of BLOCK in sorted order into the
@@ -148,7 +150,7 @@ malloc_from_heap (size_t size, struct heap *heap)
if (block < mmb->mem)
break;
- new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
+ new_mmb = malloc_from_heap (sizeof *new_mmb, __malloc_mmb_heap, &__malloc_mmb_heap_lock);
new_mmb->next = mmb;
new_mmb->mem = block;
new_mmb->size = block_size;
@@ -207,7 +209,7 @@ malloc (size_t size)
if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2))))
goto oom;
- mem = malloc_from_heap (size, &__malloc_heap);
+ mem = malloc_from_heap (size, __malloc_heap, &__malloc_heap_lock);
if (unlikely (!mem))
{
oom: