summaryrefslogtreecommitdiff
path: root/libc/stdlib
diff options
context:
space:
mode:
authorMiles Bader <miles@lsi.nec.co.jp>2002-07-25 01:58:57 +0000
committerMiles Bader <miles@lsi.nec.co.jp>2002-07-25 01:58:57 +0000
commit056f9d98941eb98e453bf4fa308f28b892525baf (patch)
tree9adaefa6923b1949e06c4a81cf889976a7e8bb18 /libc/stdlib
parent255cd531d67d4c5d110409e2a24e2aa5a6249a7a (diff)
Redo the locking, so that it may actually work. Now locking is done at
the malloc/free level, not within the heap abstraction, and there's a separate lock to control sbrk access. Also, get rid of the separate `unmap_free_area' function in free.c, and just put the code in the `free' function directly, which saves a bunch of space (even compared to using an inline function) for some reason.
Diffstat (limited to 'libc/stdlib')
-rw-r--r--libc/stdlib/malloc/free.c193
-rw-r--r--libc/stdlib/malloc/heap.h19
-rw-r--r--libc/stdlib/malloc/heap_alloc.c4
-rw-r--r--libc/stdlib/malloc/heap_alloc_at.c4
-rw-r--r--libc/stdlib/malloc/heap_free.c6
-rw-r--r--libc/stdlib/malloc/malloc.c49
-rw-r--r--libc/stdlib/malloc/malloc.h38
-rw-r--r--libc/stdlib/malloc/realloc.c3
8 files changed, 186 insertions, 130 deletions
diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
index 4aa21cb84..f7d8fc18d 100644
--- a/libc/stdlib/malloc/free.c
+++ b/libc/stdlib/malloc/free.c
@@ -18,93 +18,6 @@
#include "malloc.h"
#include "heap.h"
-
-/* Try to release the free-area FA in HEAP back to the system. */
-static void
-unmap_free_area (struct heap *heap, struct heap_free_area *fa)
-{
- unsigned long start, end;
-#ifndef MALLOC_USE_SBRK
- unsigned long unmap_start, unmap_end;
-#endif
-
- end = (unsigned long)HEAP_FREE_AREA_END (fa);
-
-#ifdef MALLOC_USE_SBRK
- /* When using sbrk, we only shrink the heap from the end. It would be
- possible to allow _both_ -- shrinking via sbrk when possible, and
- otherwise shrinking via munmap, but this results in holes in memory
- that prevent the brk from every growing back down; since we only ever
- grow the heap via sbrk, this tends to produce a continuously growing
- brk (though the actual memory is unmapped), which could eventually run
- out of address space. Note that `sbrk(0)' shouldn't normally do a
- system call, so this test is reasonably cheap. */
- if ((void *)end != sbrk (0))
- {
- MALLOC_DEBUG (" not unmapping: 0x%lx - 0x%lx (%d bytes)\n",
- (unsigned long)HEAP_FREE_AREA_START (fa),
- (unsigned long)HEAP_FREE_AREA_END (fa),
- fa->size);
- return;
- }
-#endif
-
- start = (unsigned long)HEAP_FREE_AREA_START (fa);
-
- MALLOC_DEBUG (" unmapping: 0x%lx - 0x%lx (%ld bytes)\n",
- start, end, end - start);
-
- /* Remove FA from the heap. */
- __heap_unlink_free_area (heap, fa);
-
- if (!fa->next && !fa->prev)
- /* We want to avoid the heap from losing all memory, so reserve a bit.
- This test is only a heuristic -- the existance of another free area,
- even if it's smaller than MALLOC_MIN_SIZE, will cause us not to
- reserve anything. */
- {
- /* Put the reserved memory back in the heap; we asssume that
- MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so we use
- the latter unconditionally here. */
- __heap_free (heap, (void *)start, MALLOC_MIN_SIZE);
- start += MALLOC_MIN_SIZE;
- }
-
-#ifdef MALLOC_USE_SBRK
-
- sbrk (start - end);
-
-#else /* !MALLOC_USE_SBRK */
-
- /* MEM/LEN may not be page-aligned, so we have to page-align them, and
- return any left-over bits on the end to the heap. */
- unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start);
- unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end);
-
- /* We have to be careful that any left-over bits are large enough to
- return. Note that we _don't check_ to make sure there's room to
- grow/shrink the start/end by another page, we just assume that the
- unmap threshold is high enough so that this is always safe (i.e., it
- should probably be at least 3 pages). */
- if (unmap_start > start)
- {
- if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE)
- unmap_start += MALLOC_PAGE_SIZE;
- __heap_free (heap, (void *)start, unmap_start - start);
- }
- if (end > unmap_end)
- {
- if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE)
- unmap_end -= MALLOC_PAGE_SIZE;
- __heap_free (heap, (void *)unmap_end, end - unmap_end);
- }
-
- if (unmap_end > unmap_start)
- munmap ((void *)unmap_start, unmap_end - unmap_start);
-
-#endif /* MALLOC_USE_SBRK */
-}
-
void
free (void *mem)
@@ -120,12 +33,112 @@ free (void *mem)
MALLOC_DEBUG ("free: 0x%lx (base = 0x%lx, total_size = %d)\n",
(long)mem + MALLOC_ALIGNMENT, (long)mem, size);
+ __malloc_lock ();
+
fa = __heap_free (&__malloc_heap, mem, size);
/* Now we check to see if FA has grown big enough that it should be
unmapped. */
- if (HEAP_FREE_AREA_SIZE (fa) >= MALLOC_UNMAP_THRESHOLD)
- /* Get rid of it. */
- unmap_free_area (&__malloc_heap, fa);
+ if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD)
+ /* Nothing left to do, just release the lock. */
+ __malloc_unlock ();
+ else
+ /* Try to unmap FA. */
+ {
+ unsigned long start, end;
+#ifndef MALLOC_USE_SBRK
+ unsigned long unmap_start, unmap_end;
+#endif
+
+ end = (unsigned long)HEAP_FREE_AREA_END (fa);
+
+#ifdef MALLOC_USE_SBRK
+ /* Get the sbrk lock so that the two possible calls to sbrk below
+ are guaranteed to be contiguous. */
+ __malloc_lock_sbrk ();
+ /* When using sbrk, we only shrink the heap from the end. It would
+ be possible to allow _both_ -- shrinking via sbrk when possible,
+ and otherwise shrinking via munmap, but this results in holes in
+ memory that prevent the brk from every growing back down; since
+ we only ever grow the heap via sbrk, this tends to produce a
+ continuously growing brk (though the actual memory is unmapped),
+ which could eventually run out of address space. Note that
+ `sbrk(0)' shouldn't normally do a system call, so this test is
+ reasonably cheap. */
+ if ((void *)end != sbrk (0))
+ {
+ MALLOC_DEBUG (" not unmapping: 0x%lx - 0x%lx (%d bytes)\n",
+ (unsigned long)HEAP_FREE_AREA_START (fa),
+ (unsigned long)HEAP_FREE_AREA_END (fa),
+ fa->size);
+ __malloc_unlock_sbrk ();
+ __malloc_unlock ();
+ return;
+ }
+#endif
+
+ start = (unsigned long)HEAP_FREE_AREA_START (fa);
+
+ MALLOC_DEBUG (" unmapping: 0x%lx - 0x%lx (%ld bytes)\n",
+ start, end, end - start);
+
+ /* Remove FA from the heap. */
+ __heap_unlink_free_area (&__malloc_heap, fa);
+
+ if (!fa->next && !fa->prev)
+ /* We want to avoid the heap from losing all memory, so reserve
+ a bit. This test is only a heuristic -- the existance of
+ another free area, even if it's smaller than
+ MALLOC_MIN_SIZE, will cause us not to reserve anything. */
+ {
+ /* Put the reserved memory back in the heap; we asssume that
+ MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so
+ we use the latter unconditionally here. */
+ __heap_free (&__malloc_heap, (void *)start, MALLOC_MIN_SIZE);
+ start += MALLOC_MIN_SIZE;
+ }
+
+#ifdef MALLOC_USE_SBRK
+
+ /* Release the main lock; we're still holding the sbrk lock. */
+ __malloc_unlock ();
+ /* Lower the brk. */
+ sbrk (start - end);
+ /* Release the sbrk lock too; now we hold no locks. */
+ __malloc_unlock_sbrk ();
+
+#else /* !MALLOC_USE_SBRK */
+
+ /* MEM/LEN may not be page-aligned, so we have to page-align them,
+ and return any left-over bits on the end to the heap. */
+ unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start);
+ unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end);
+
+ /* We have to be careful that any left-over bits are large enough to
+ return. Note that we _don't check_ to make sure there's room to
+ grow/shrink the start/end by another page, we just assume that
+ the unmap threshold is high enough so that this is always safe
+ (i.e., it should probably be at least 3 pages). */
+ if (unmap_start > start)
+ {
+ if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE)
+ unmap_start += MALLOC_PAGE_SIZE;
+ __heap_free (&__malloc_heap, (void *)start, unmap_start - start);
+ }
+ if (end > unmap_end)
+ {
+ if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE)
+ unmap_end -= MALLOC_PAGE_SIZE;
+ __heap_free (&__malloc_heap, (void *)unmap_end, end - unmap_end);
+ }
+
+ /* Release the malloc lock before we do the system call. */
+ __malloc_unlock ();
+
+ if (unmap_end > unmap_start)
+ munmap ((void *)unmap_start, unmap_end - unmap_start);
+
+#endif /* MALLOC_USE_SBRK */
+ }
}
}
diff --git a/libc/stdlib/malloc/heap.h b/libc/stdlib/malloc/heap.h
index 5207afe8e..445b012ce 100644
--- a/libc/stdlib/malloc/heap.h
+++ b/libc/stdlib/malloc/heap.h
@@ -14,21 +14,6 @@
#include <features.h>
-#ifdef __UCLIBC_HAS_THREADS__
-#include <pthread.h>
-typedef pthread_mutex_t heap_mutex_t;
-# define HEAP_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
-# define __heap_lock(heap) pthread_mutex_lock (&(heap)->lock)
-# define __heap_unlock(heap) pthread_mutex_unlock (&(heap)->lock);
-#else
-/* Without threads, Mutex operations are a nop. */
-typedef int heap_mutex_t;
-# define HEAP_MUTEX_INIT 0
-# define __heap_lock(heap)
-# define __heap_unlock(heap)
-#endif
-
-
/* The heap allocates in multiples of, and aligned to, HEAP_GRANULARITY.
HEAP_GRANULARITY must be a power of 2. Malloc depends on this being the
same as MALLOC_ALIGNMENT. */
@@ -41,10 +26,8 @@ struct heap
{
/* A list of memory in the heap available for allocation. */
struct heap_free_area *free_areas;
-
- heap_mutex_t lock;
};
-#define HEAP_INIT { 0, HEAP_MUTEX_INIT }
+#define HEAP_INIT { 0 }
/* A free-list area `header'. These are actually stored at the _ends_ of
diff --git a/libc/stdlib/malloc/heap_alloc.c b/libc/stdlib/malloc/heap_alloc.c
index d0e1d752a..8a6c7842e 100644
--- a/libc/stdlib/malloc/heap_alloc.c
+++ b/libc/stdlib/malloc/heap_alloc.c
@@ -33,8 +33,6 @@ __heap_alloc (struct heap *heap, size_t *size)
we must make sure that every allocated block can hold one. */
_size = HEAP_ADJUST_SIZE (sizeof (struct heap_free_area));
- __heap_lock (heap);
-
HEAP_DEBUG (heap, "before __heap_alloc");
/* Look for a free area that can contain _SIZE bytes. */
@@ -49,7 +47,5 @@ __heap_alloc (struct heap *heap, size_t *size)
HEAP_DEBUG (heap, "after __heap_alloc");
- __heap_unlock (heap);
-
return mem;
}
diff --git a/libc/stdlib/malloc/heap_alloc_at.c b/libc/stdlib/malloc/heap_alloc_at.c
index 4047697c5..de84e99ee 100644
--- a/libc/stdlib/malloc/heap_alloc_at.c
+++ b/libc/stdlib/malloc/heap_alloc_at.c
@@ -26,8 +26,6 @@ __heap_alloc_at (struct heap *heap, void *mem, size_t size)
size = HEAP_ADJUST_SIZE (size);
- __heap_lock (heap);
-
HEAP_DEBUG (heap, "before __heap_alloc_at");
/* Look for a free area that can contain SIZE bytes. */
@@ -45,7 +43,5 @@ __heap_alloc_at (struct heap *heap, void *mem, size_t size)
HEAP_DEBUG (heap, "after __heap_alloc_at");
- __heap_unlock (heap);
-
return alloced;
}
diff --git a/libc/stdlib/malloc/heap_free.c b/libc/stdlib/malloc/heap_free.c
index e958f920d..6084b43c3 100644
--- a/libc/stdlib/malloc/heap_free.c
+++ b/libc/stdlib/malloc/heap_free.c
@@ -23,8 +23,6 @@ __heap_free (struct heap *heap, void *mem, size_t size)
struct heap_free_area *prev_fa, *fa;
void *end = (char *)mem + size;
- __heap_lock (heap);
-
HEAP_DEBUG (heap, "before __heap_free");
/* Find an adjacent free-list entry. */
@@ -59,8 +57,8 @@ __heap_free (struct heap *heap, void *mem, size_t size)
/* See if FA can now be merged with its successor. */
if (next_fa && mem + size == HEAP_FREE_AREA_START (next_fa))
+ /* Yup; merge FA's info into NEXT_FA. */
{
- /* Yup; merge FA's info into NEXT_FA. */
fa_size += next_fa->size;
__heap_link_free_area_after (heap, next_fa, prev_fa);
fa = next_fa;
@@ -92,7 +90,5 @@ __heap_free (struct heap *heap, void *mem, size_t size)
done:
HEAP_DEBUG (heap, "after __heap_free");
- __heap_unlock (heap);
-
return fa;
}
diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
index 049a3e003..e6b9c0d66 100644
--- a/libc/stdlib/malloc/malloc.c
+++ b/libc/stdlib/malloc/malloc.c
@@ -22,6 +22,15 @@
/* The malloc heap. */
struct heap __malloc_heap = HEAP_INIT;
+#ifdef MALLOC_USE_LOCKING
+/* A lock protecting the malloc heap. */
+malloc_mutex_t __malloc_lock;
+# ifdef MALLOC_USE_SBRK
+/* A lock protecting our use of sbrk. */
+malloc_mutex_t __malloc_sbrk_lock;
+# endif /* MALLOC_USE_SBRK */
+#endif /* MALLOC_USE_LOCKING */
+
void *
malloc (size_t size)
@@ -33,6 +42,8 @@ malloc (size_t size)
/* Include extra space to record the size of the allocated block. */
size += MALLOC_ROUND_UP (sizeof (size_t), MALLOC_ALIGNMENT);
+ __malloc_lock ();
+
mem = __heap_alloc (&__malloc_heap, &size);
if (! mem)
/* We couldn't allocate from the heap, so get some more memory
@@ -40,24 +51,30 @@ malloc (size_t size)
{
/* If we're trying to allocate a block bigger than the default
MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
+ void *block;
size_t block_size
= (size < MALLOC_HEAP_EXTEND_SIZE
? MALLOC_HEAP_EXTEND_SIZE
: MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
+
+#ifdef MALLOC_USE_SBRK
+ /* Get the sbrk lock while we've still got the main lock. */
+ __malloc_lock_sbrk ();
+#endif
+
+ /* Don't hold the main lock during the syscall, so that small
+ allocations in a different thread may succeed while we're
+ blocked. */
+ __malloc_unlock ();
+
/* Allocate the new heap block. */
#ifdef MALLOC_USE_SBRK
+
/* Use sbrk we can, as it's faster than mmap, and guarantees
contiguous allocation. */
- void *block = sbrk (block_size);
-#else
- /* Otherwise, use mmap. */
- void *block = mmap (0, block_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, 0, 0);
-#endif
-
+ block = sbrk (block_size);
if (block != (void *)-1)
{
-#ifdef MALLOC_USE_SBRK
/* Because sbrk can return results of arbitrary
alignment, align the result to a MALLOC_ALIGNMENT boundary. */
long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT);
@@ -71,8 +88,22 @@ malloc (size_t size)
sbrk (aligned_block - (long)block);
block = (void *)aligned_block;
}
+ }
+ __malloc_unlock_sbrk ();
+
+#else /* !MALLOC_USE_SBRK */
+
+ /* Otherwise, use mmap. */
+ block = mmap (0, block_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, 0, 0);
+
#endif /* MALLOC_USE_SBRK */
+ /* Get back the main lock. */
+ __malloc_lock ();
+
+ if (block != (void *)-1)
+ {
MALLOC_DEBUG (" adding memory: 0x%lx - 0x%lx (%d bytes)\n",
(long)block, (long)block + block_size, block_size);
@@ -84,6 +115,8 @@ malloc (size_t size)
}
}
+ __malloc_unlock ();
+
if (mem)
/* Record the size of this block just before the returned address. */
{
diff --git a/libc/stdlib/malloc/malloc.h b/libc/stdlib/malloc/malloc.h
index 0cab1dba2..f01484368 100644
--- a/libc/stdlib/malloc/malloc.h
+++ b/libc/stdlib/malloc/malloc.h
@@ -38,10 +38,46 @@
mmap/munmap, and guarantees contiguous allocation, but is also less
flexible, and causes the heap to only be shrinkable from the end. */
#ifdef __UCLIBC_HAS_MMU__
-#define MALLOC_USE_SBRK
+# define MALLOC_USE_SBRK
#endif
+#ifdef __UCLIBC_HAS_THREADS__
+
+# include <pthread.h>
+
+# define MALLOC_USE_LOCKING
+
+typedef pthread_mutex_t malloc_mutex_t;
+# define MALLOC_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
+
+/* The main malloc lock. This must be hold while accessing __malloc_heap,
+ and in order to gain __malloc_sbrk_lock. */
+extern malloc_mutex_t __malloc_lock;
+# define __malloc_lock() pthread_mutex_lock (&__malloc_lock)
+# define __malloc_unlock() pthread_mutex_unlock (&__malloc_lock)
+
+# ifdef MALLOC_USE_SBRK
+/* This lock is used to serialize uses of the `sbrk' function (in both
+ malloc and free, sbrk may be used several times in succession, and
+ things will break if these multiple calls are interleaved with another
+ thread's use of sbrk!). */
+extern malloc_mutex_t __malloc_sbrk_lock;
+# define __malloc_lock_sbrk() pthread_mutex_lock (&__malloc_sbrk_lock)
+# define __malloc_unlock_sbrk() pthread_mutex_unlock (&__malloc_sbrk_lock)
+# endif /* MALLOC_USE_SBRK */
+
+#else /* !__UCLIBC_HAS_THREADS__ */
+
+/* Without threads, mutex operations are a nop. */
+# define __malloc_lock() (void)0
+# define __malloc_unlock() (void)0
+# define __malloc_lock_sbrk() (void)0
+# define __malloc_unlock_sbrk() (void)0
+
+#endif /* __UCLIBC_HAS_THREADS__ */
+
+
/* Change this to `#if 1' to cause malloc to emit debugging info to stderr. */
#if 0
#include <stdio.h>
diff --git a/libc/stdlib/malloc/realloc.c b/libc/stdlib/malloc/realloc.c
index 7d8227aff..7a89712e3 100644
--- a/libc/stdlib/malloc/realloc.c
+++ b/libc/stdlib/malloc/realloc.c
@@ -39,7 +39,10 @@ realloc (void *mem, size_t new_size)
size_t ext_size = new_size - size;
void *ext_addr = (char *)base_mem + ext_size;
+ __malloc_lock ();
ext_size = __heap_alloc_at (&__malloc_heap, ext_addr, ext_size);
+ __malloc_unlock ();
+
if (! ext_size)
/* Our attempts to extend MEM in place failed, just
allocate-and-copy. */