summaryrefslogtreecommitdiff
path: root/libpthread/linuxthreads.old/sysdeps/x86_64
AgeCommit message (Collapse)Author
2008-04-24- fixup asm. No object-code changesBernhard Reutner-Fischer
2008-01-06use the __extern_always_inline define from cdefs.h instead of duplicating ↵Mike Frysinger
gcc version checking in every pt-machine.h header ... while __extern_always_inline should work fine, i think what is intended is __extern_inline ... should double check later
2008-01-05- fixup gnu_inline vs. C99 inlineBernhard Reutner-Fischer
- add missing header guards while at it
2007-01-29since these functions get exported, namespace the argumentsMike Frysinger
2005-11-15rename current stable linuxthreads to linuxthreads.old to prepare for import ↵Mike Frysinger
of latest glibc version
_FA (initial_fa);
+struct heap_free_area *__malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+malloc_mutex_t __malloc_heap_lock = PTHREAD_MUTEX_INITIALIZER;
#if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
/* A lock protecting our use of sbrk. */
@@ -43,12 +44,13 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
them from the main heap, but that tends to cause heap fragmentation in
annoying ways. */
HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
-struct heap __malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
#endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
static void *
-malloc_from_heap (size_t size, struct heap *heap)
+malloc_from_heap (size_t size, struct heap_free_area *heap, malloc_mutex_t *heap_lock)
{
void *mem;
@@ -57,12 +59,12 @@ malloc_from_heap (size_t size, struct heap *heap)
/* Include extra space to record the size of the allocated block. */
size += MALLOC_HEADER_SIZE;
- __heap_lock (heap);
+ __pthread_mutex_lock (heap_lock);
/* First try to get memory that's already in our heap. */
mem = __heap_alloc (heap, &size);
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
if (unlikely (! mem))
/* We couldn't allocate from the heap, so grab some more
@@ -126,7 +128,7 @@ malloc_from_heap (size_t size, struct heap *heap)
(long)block, (long)block + block_size, block_size);
/* Get back the heap lock. */
- __heap_lock (heap);
+ __pthread_mutex_lock (heap_lock);
/* Put BLOCK into the heap. */
__heap_free (heap, block, block_size);
@@ -136,7 +138,7 @@ malloc_from_heap (size_t size, struct heap *heap)
/* Try again to allocate. */
mem = __heap_alloc (heap, &size);
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
/* Insert a record of BLOCK in sorted order into the
@@ -148,7 +150,7 @@ malloc_from_heap (size_t size, struct heap *heap)
if (block < mmb->mem)
break;
- new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
+ new_mmb = malloc_from_heap (sizeof *new_mmb, __malloc_mmb_heap, &__malloc_mmb_heap_lock);
new_mmb->next = mmb;
new_mmb->mem = block;
new_mmb->size = block_size;
@@ -207,7 +209,7 @@ malloc (size_t size)
if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2))))
goto oom;
- mem = malloc_from_heap (size, &__malloc_heap);
+ mem = malloc_from_heap (size, __malloc_heap, &__malloc_heap_lock);
if (unlikely (!mem))
{
oom: