summaryrefslogtreecommitdiff
path: root/libc/stdlib/malloc/malloc.c
diff options
context:
space:
mode:
authorMiles Bader <miles@lsi.nec.co.jp>2002-07-23 06:50:40 +0000
committerMiles Bader <miles@lsi.nec.co.jp>2002-07-23 06:50:40 +0000
commit83cef9f931bcd2030f42079c332525e1e73ab6aa (patch)
tree5867067ac5387998a301f69a59ca6d78b26680d5 /libc/stdlib/malloc/malloc.c
parenta9752043dd652d0fb4addf947b76e57c588f430c (diff)
* Automatically try to unmap heap free-areas when they get very big.
* Instead of using mmap/munmap directly for large allocations, just use the heap for everything (this is reasonable now that heap memory can be unmapped). * Use sbrk instead of mmap/munmap on systems with an MMU.
Diffstat (limited to 'libc/stdlib/malloc/malloc.c')
-rw-r--r--libc/stdlib/malloc/malloc.c110
1 files changed, 50 insertions, 60 deletions
diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
index 32d56c153..0e84bf646 100644
--- a/libc/stdlib/malloc/malloc.c
+++ b/libc/stdlib/malloc/malloc.c
@@ -12,25 +12,14 @@
*/
#include <stdlib.h>
+#include <unistd.h>
#include <sys/mman.h>
#include "malloc.h"
#include "heap.h"
-/* When we give memory to the heap, start this many bytes after the
- beginning of the mmaped block. This is because we must ensure that
- malloc return values are aligned to MALLOC_ALIGNMENT, but since we need
- to use one word _before_ the beginning of that, we actually want the heap
- to return values that are MALLOC_ALIGNMENT aligned - sizeof (size_t).
- Since the heap always allocates in multiples of HEAP_GRANULARITY, we can
- do this by (1) ensuring that HEAP_GRANULARITY is a multiple of
- MALLOC_ALIGNMENT, and (2) making sure that the heap's free areas start
- sizeof(size_t) bytes before our required alignment. */
-#define MALLOC_HEAP_BLOCK_SHIM (MALLOC_ALIGNMENT - sizeof (size_t))
-
-
-/* The heap used for small allocations. */
+/* The malloc heap. */
struct heap __malloc_heap = HEAP_INIT;
@@ -40,56 +29,57 @@ void *malloc (size_t size)
MALLOC_DEBUG ("malloc: %d bytes\n", size);
- /* Include an extra word to record the size of the allocated block. */
- size += sizeof (size_t);
-
- if (size >= MALLOC_MMAP_THRESHOLD)
- /* Use mmap for large allocations. */
- {
- /* Make sure we request enough memory to align the result correctly,
- and that SIZE reflects that mmap hands back whole pages. */
- size += MALLOC_ROUND_UP_TO_PAGE_SIZE (MALLOC_ALIGNMENT - sizeof(size_t));
+ /* Include extra space to record the size of the allocated block. */
+ size += MALLOC_ROUND_UP (sizeof (size_t), MALLOC_ALIGNMENT);
- mem = mmap (0, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, 0, 0);
- if (mem == MAP_FAILED)
- return 0;
- }
- else
- /* Use the heap for small allocations. */
+ mem = __heap_alloc (&__malloc_heap, &size);
+ if (! mem)
+ /* We couldn't allocate from the heap, so get some more memory
+ from the system, add it to the heap, and try again. */
{
- mem = __heap_alloc (&__malloc_heap, &size);
-
- if (! mem)
- /* We couldn't allocate from the heap, so get some more memory
- from the system, add it to the heap, and try again. */
+ /* If we're trying to allocate a block bigger than the default
+ MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
+ size_t block_size
+ = (size < MALLOC_HEAP_EXTEND_SIZE
+ ? MALLOC_HEAP_EXTEND_SIZE
+ : MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
+ /* Allocate the new heap block. */
+#ifdef MALLOC_USE_SBRK
+ /* Use sbrk we can, as it's faster than mmap, and guarantees
+ contiguous allocation. */
+ void *block = sbrk (block_size);
+#else
+ /* Otherwise, use mmap. */
+ void *block = mmap (0, block_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, 0, 0);
+#endif
+
+ if (block != (void *)-1)
{
- /* If we're trying to allocate a block bigger than the default
- MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
- size_t block_size = (size < MALLOC_HEAP_EXTEND_SIZE
- ? MALLOC_HEAP_EXTEND_SIZE
- : MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
- /* Allocate the new heap block. */
- void *block = mmap (0, block_size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, 0, 0);
-
- if (block != MAP_FAILED)
+#ifdef MALLOC_USE_SBRK
+ /* Because sbrk can return results of arbitrary
+ alignment, align the result to a MALLOC_ALIGNMENT boundary. */
+ long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT);
+ if (block != (void *)aligned_block)
+ /* Have to adjust. We should only have to actually do this
+ the first time (after which we will have aligned the brk
+ correctly). */
{
- /* Put BLOCK into the heap. We first try to append BLOCK to
- an existing free area, which is more efficient because it
- doesn't require using a `shim' at the beginning (which
- would prevent merging free-areas); since mmap often returns
- contiguous areas, this is worth it. */
- if (! __heap_append_free (&__malloc_heap, block, block_size))
- /* Couldn't append, just add BLOCK as a new free-area. */
- __heap_free (&__malloc_heap,
- block + MALLOC_HEAP_BLOCK_SHIM,
- block_size - MALLOC_HEAP_BLOCK_SHIM);
-
- /* Try again to allocate. */
- mem = __heap_alloc (&__malloc_heap, &size);
+ /* Move the brk to reflect the alignment; our next allocation
+ should start on exactly the right alignment. */
+ sbrk (aligned_block - (long)block);
+ block = (void *)aligned_block;
}
+#endif /* MALLOC_USE_SBRK */
+
+ MALLOC_DEBUG (" adding memory: 0x%lx - 0x%lx (%d bytes)\n",
+ (long)block, (long)block + block_size, block_size);
+
+ /* Put BLOCK into the heap. */
+ __heap_free (&__malloc_heap, block, block_size);
+
+ /* Try again to allocate. */
+ mem = __heap_alloc (&__malloc_heap, &size);
}
}
@@ -97,10 +87,10 @@ void *malloc (size_t size)
/* Record the size of this block just before the returned address. */
{
*(size_t *)mem = size;
- mem = (size_t *)mem + 1;
+ mem += MALLOC_ALIGNMENT;
MALLOC_DEBUG (" malloc: returning 0x%lx (base:0x%lx, total_size:%d)\n",
- (long)mem, (long)mem - sizeof (size_t), size);
+ (long)mem, (long)mem - MALLOC_ALIGNMENT, size);
}
return mem;