summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKhem Raj <kraj@mvista.com>2008-10-11 08:52:58 +0000
committerKhem Raj <kraj@mvista.com>2008-10-11 08:52:58 +0000
commit6494060312de389feb65ad32bb411fcc64e821b7 (patch)
treeb2c735bba6b667b8cf056a858dc216c14590c138
parent47b2dbaaac9757496eb9d419e1912250354d30d1 (diff)
Fix bug 4994 hangs on read(). I have tested the patch extensibly on ARM/LT.old.
Thank you Chase Douglas for reporting it and for the patch.
-rw-r--r--libc/stdlib/malloc-simple/alloc.c2
-rw-r--r--libc/stdlib/malloc/free.c22
-rw-r--r--libc/stdlib/malloc/heap.h56
-rw-r--r--libc/stdlib/malloc/heap_alloc.c4
-rw-r--r--libc/stdlib/malloc/heap_alloc_at.c4
-rw-r--r--libc/stdlib/malloc/heap_free.c4
-rw-r--r--libc/stdlib/malloc/malloc.c20
-rw-r--r--libc/stdlib/malloc/malloc.h6
-rw-r--r--libc/stdlib/malloc/memalign.c2
-rw-r--r--libc/stdlib/malloc/realloc.c12
-rw-r--r--libpthread/linuxthreads.old/ptfork.c50
-rw-r--r--libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h38
12 files changed, 137 insertions, 83 deletions
diff --git a/libc/stdlib/malloc-simple/alloc.c b/libc/stdlib/malloc-simple/alloc.c
index 13d4166a7..b6d1ec676 100644
--- a/libc/stdlib/malloc-simple/alloc.c
+++ b/libc/stdlib/malloc-simple/alloc.c
@@ -116,7 +116,7 @@ void free(void *ptr)
#ifdef L_memalign
#include <bits/uClibc_mutex.h>
-__UCLIBC_MUTEX_STATIC(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+__UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
index da395331b..fd29690ad 100644
--- a/libc/stdlib/malloc/free.c
+++ b/libc/stdlib/malloc/free.c
@@ -22,7 +22,7 @@ libc_hidden_proto(sbrk)
#include "heap.h"
static void
-free_to_heap (void *mem, struct heap *heap)
+free_to_heap (void *mem, struct heap_free_area *heap, malloc_mutex_t *heap_lock)
{
size_t size;
struct heap_free_area *fa;
@@ -39,7 +39,7 @@ free_to_heap (void *mem, struct heap *heap)
size = MALLOC_SIZE (mem);
mem = MALLOC_BASE (mem);
- __heap_lock (heap);
+ __pthread_mutex_lock (heap_lock);
/* Put MEM back in the heap, and get the free-area it was placed in. */
fa = __heap_free (heap, mem, size);
@@ -48,7 +48,7 @@ free_to_heap (void *mem, struct heap *heap)
unmapped. */
if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD)
/* Nope, nothing left to do, just release the lock. */
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
else
/* Yup, try to unmap FA. */
{
@@ -81,7 +81,7 @@ free_to_heap (void *mem, struct heap *heap)
MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
start, end, end - start);
__malloc_unlock_sbrk ();
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
return;
}
#endif
@@ -108,7 +108,7 @@ free_to_heap (void *mem, struct heap *heap)
#ifdef MALLOC_USE_SBRK
/* Release the heap lock; we're still holding the sbrk lock. */
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
/* Lower the brk. */
sbrk (start - end);
/* Release the sbrk lock too; now we hold no locks. */
@@ -172,15 +172,15 @@ free_to_heap (void *mem, struct heap *heap)
/* We have to unlock the heap before we recurse to free the mmb
descriptor, because we might be unmapping from the mmb
heap. */
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
/* Release the descriptor block we used. */
- free_to_heap (mmb, &__malloc_mmb_heap);
+ free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
/* Do the actual munmap. */
munmap ((void *)mmb_start, mmb_end - mmb_start);
- __heap_lock (heap);
+ __pthread_mutex_lock (heap_lock);
# ifdef __UCLIBC_HAS_THREADS__
/* In a multi-threaded program, it's possible that PREV_MMB has
@@ -213,7 +213,7 @@ free_to_heap (void *mem, struct heap *heap)
}
/* Finally release the lock for good. */
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
MALLOC_MMB_DEBUG_INDENT (-1);
@@ -243,7 +243,7 @@ free_to_heap (void *mem, struct heap *heap)
}
/* Release the heap lock before we do the system call. */
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
if (unmap_end > unmap_start)
/* Finally, actually unmap the memory. */
@@ -260,5 +260,5 @@ free_to_heap (void *mem, struct heap *heap)
void
free (void *mem)
{
- free_to_heap (mem, &__malloc_heap);
+ free_to_heap (mem, __malloc_heap, &__malloc_heap_lock);
}
diff --git a/libc/stdlib/malloc/heap.h b/libc/stdlib/malloc/heap.h
index 6505cd223..8b05cded1 100644
--- a/libc/stdlib/malloc/heap.h
+++ b/libc/stdlib/malloc/heap.h
@@ -29,32 +29,12 @@
#define HEAP_GRANULARITY (__alignof__ (HEAP_GRANULARITY_TYPE))
-/* A heap is a collection of memory blocks, from which smaller blocks
- of memory can be allocated. */
-struct heap
-{
- /* A list of memory in the heap available for allocation. */
- struct heap_free_area *free_areas;
-
-#ifdef HEAP_USE_LOCKING
- /* A lock that can be used by callers to control access to the heap.
- The heap code _does not_ use this lock, it's merely here for the
- convenience of users! */
- pthread_mutex_t lock;
-#endif
-};
-
/* The HEAP_INIT macro can be used as a static initializer for a heap
variable. The HEAP_INIT_WITH_FA variant is used to initialize a heap
with an initial static free-area; its argument FA should be declared
using HEAP_DECLARE_STATIC_FREE_AREA. */
-#ifdef HEAP_USE_LOCKING
-# define HEAP_INIT { 0, PTHREAD_MUTEX_INITIALIZER }
-# define HEAP_INIT_WITH_FA(fa) { &fa._fa, PTHREAD_MUTEX_INITIALIZER }
-#else
-# define HEAP_INIT { 0 }
-# define HEAP_INIT_WITH_FA(fa) { &fa._fa }
-#endif
+# define HEAP_INIT 0
+# define HEAP_INIT_WITH_FA(fa) &fa._fa
/* A free-list area `header'. These are actually stored at the _ends_ of
free areas (to make allocating from the beginning of the area simpler),
@@ -129,27 +109,23 @@ extern int __heap_debug;
#endif
/* Output a text representation of HEAP to stderr, labelling it with STR. */
-extern void __heap_dump (struct heap *heap, const char *str);
+extern void __heap_dump (struct heap_free_area *heap, const char *str);
/* Do some consistency checks on HEAP. If they fail, output an error
message to stderr, and exit. STR is printed with the failure message. */
-extern void __heap_check (struct heap *heap, const char *str);
-
-
-#define __heap_lock(heap) __pthread_mutex_lock (&(heap)->lock)
-#define __heap_unlock(heap) __pthread_mutex_unlock (&(heap)->lock)
+extern void __heap_check (struct heap_free_area *heap, const char *str);
/* Delete the free-area FA from HEAP. */
static __inline__ void
-__heap_delete (struct heap *heap, struct heap_free_area *fa)
+__heap_delete (struct heap_free_area *heap, struct heap_free_area *fa)
{
if (fa->next)
fa->next->prev = fa->prev;
if (fa->prev)
fa->prev->next = fa->next;
else
- heap->free_areas = fa->next;
+ heap = fa->next;
}
@@ -157,7 +133,7 @@ __heap_delete (struct heap *heap, struct heap_free_area *fa)
HEAP. PREV and NEXT may be 0; if PREV is 0, FA is installed as the
first free-area. */
static __inline__ void
-__heap_link_free_area (struct heap *heap, struct heap_free_area *fa,
+__heap_link_free_area (struct heap_free_area *heap, struct heap_free_area *fa,
struct heap_free_area *prev,
struct heap_free_area *next)
{
@@ -167,7 +143,7 @@ __heap_link_free_area (struct heap *heap, struct heap_free_area *fa,
if (prev)
prev->next = fa;
else
- heap->free_areas = fa;
+ heap = fa;
if (next)
next->prev = fa;
}
@@ -176,14 +152,14 @@ __heap_link_free_area (struct heap *heap, struct heap_free_area *fa,
PREV may be 0, in which case FA is installed as the first free-area (but
FA may not be 0). */
static __inline__ void
-__heap_link_free_area_after (struct heap *heap,
+__heap_link_free_area_after (struct heap_free_area *heap,
struct heap_free_area *fa,
struct heap_free_area *prev)
{
if (prev)
prev->next = fa;
else
- heap->free_areas = fa;
+ heap = fa;
fa->prev = prev;
}
@@ -192,7 +168,7 @@ __heap_link_free_area_after (struct heap *heap,
PREV and NEXT may be 0; if PREV is 0, MEM is installed as the first
free-area. */
static __inline__ struct heap_free_area *
-__heap_add_free_area (struct heap *heap, void *mem, size_t size,
+__heap_add_free_area (struct heap_free_area *heap, void *mem, size_t size,
struct heap_free_area *prev,
struct heap_free_area *next)
{
@@ -210,7 +186,7 @@ __heap_add_free_area (struct heap *heap, void *mem, size_t size,
/* Allocate SIZE bytes from the front of the free-area FA in HEAP, and
return the amount actually allocated (which may be more than SIZE). */
static __inline__ size_t
-__heap_free_area_alloc (struct heap *heap,
+__heap_free_area_alloc (struct heap_free_area *heap,
struct heap_free_area *fa, size_t size)
{
size_t fa_size = fa->size;
@@ -234,16 +210,16 @@ __heap_free_area_alloc (struct heap *heap,
/* Allocate and return a block at least *SIZE bytes long from HEAP.
*SIZE is adjusted to reflect the actual amount allocated (which may be
greater than requested). */
-extern void *__heap_alloc (struct heap *heap, size_t *size);
+extern void *__heap_alloc (struct heap_free_area *heap, size_t *size);
/* Allocate SIZE bytes at address MEM in HEAP. Return the actual size
allocated, or 0 if we failed. */
-extern size_t __heap_alloc_at (struct heap *heap, void *mem, size_t size);
+extern size_t __heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size);
/* Return the memory area MEM of size SIZE to HEAP.
Returns the heap free area into which the memory was placed. */
-extern struct heap_free_area *__heap_free (struct heap *heap,
+extern struct heap_free_area *__heap_free (struct heap_free_area *heap,
void *mem, size_t size);
/* Return true if HEAP contains absolutely no memory. */
-#define __heap_is_empty(heap) (! (heap)->free_areas)
+#define __heap_is_empty(heap) (! (heap))
diff --git a/libc/stdlib/malloc/heap_alloc.c b/libc/stdlib/malloc/heap_alloc.c
index 9f5fd6c1a..cd52038d3 100644
--- a/libc/stdlib/malloc/heap_alloc.c
+++ b/libc/stdlib/malloc/heap_alloc.c
@@ -20,7 +20,7 @@
*SIZE is adjusted to reflect the actual amount allocated (which may be
greater than requested). */
void *
-__heap_alloc (struct heap *heap, size_t *size)
+__heap_alloc (struct heap_free_area *heap, size_t *size)
{
struct heap_free_area *fa;
size_t _size = *size;
@@ -36,7 +36,7 @@ __heap_alloc (struct heap *heap, size_t *size)
HEAP_DEBUG (heap, "before __heap_alloc");
/* Look for a free area that can contain _SIZE bytes. */
- for (fa = heap->free_areas; fa; fa = fa->next)
+ for (fa = heap; fa; fa = fa->next)
if (fa->size >= _size)
{
/* Found one! */
diff --git a/libc/stdlib/malloc/heap_alloc_at.c b/libc/stdlib/malloc/heap_alloc_at.c
index a65140fea..4c071b9ef 100644
--- a/libc/stdlib/malloc/heap_alloc_at.c
+++ b/libc/stdlib/malloc/heap_alloc_at.c
@@ -19,7 +19,7 @@
/* Allocate SIZE bytes at address MEM in HEAP. Return the actual size
allocated, or 0 if we failed. */
size_t
-__heap_alloc_at (struct heap *heap, void *mem, size_t size)
+__heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size)
{
struct heap_free_area *fa;
size_t alloced = 0;
@@ -29,7 +29,7 @@ __heap_alloc_at (struct heap *heap, void *mem, size_t size)
HEAP_DEBUG (heap, "before __heap_alloc_at");
/* Look for a free area that can contain SIZE bytes. */
- for (fa = heap->free_areas; fa; fa = fa->next)
+ for (fa = heap; fa; fa = fa->next)
{
void *fa_mem = HEAP_FREE_AREA_START (fa);
if (fa_mem <= mem)
diff --git a/libc/stdlib/malloc/heap_free.c b/libc/stdlib/malloc/heap_free.c
index 1c4634c55..3326bc691 100644
--- a/libc/stdlib/malloc/heap_free.c
+++ b/libc/stdlib/malloc/heap_free.c
@@ -18,7 +18,7 @@
/* Return the block of memory at MEM, of size SIZE, to HEAP. */
struct heap_free_area *
-__heap_free (struct heap *heap, void *mem, size_t size)
+__heap_free (struct heap_free_area *heap, void *mem, size_t size)
{
struct heap_free_area *fa, *prev_fa;
void *end = (char *)mem + size;
@@ -32,7 +32,7 @@ __heap_free (struct heap *heap, void *mem, size_t size)
in the free-list when it becomes fragmented and long. [A better
implemention would use a balanced tree or something for the free-list,
though that bloats the code-size and complexity quite a bit.] */
- for (prev_fa = 0, fa = heap->free_areas; fa; prev_fa = fa, fa = fa->next)
+ for (prev_fa = 0, fa = heap; fa; prev_fa = fa, fa = fa->next)
if (unlikely (HEAP_FREE_AREA_END (fa) >= mem))
break;
diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
index ce74c5608..0caf012a2 100644
--- a/libc/stdlib/malloc/malloc.c
+++ b/libc/stdlib/malloc/malloc.c
@@ -26,7 +26,8 @@ libc_hidden_proto(sbrk)
/* The malloc heap. We provide a bit of initial static space so that
programs can do a little mallocing without mmaping in more space. */
HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256);
-struct heap __malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+struct heap_free_area *__malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
+malloc_mutex_t __malloc_heap_lock = PTHREAD_MUTEX_INITIALIZER;
#if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
/* A lock protecting our use of sbrk. */
@@ -43,12 +44,13 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
them from the main heap, but that tends to cause heap fragmentation in
annoying ways. */
HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
-struct heap __malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
#endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
static void *
-malloc_from_heap (size_t size, struct heap *heap)
+malloc_from_heap (size_t size, struct heap_free_area *heap, malloc_mutex_t *heap_lock)
{
void *mem;
@@ -57,12 +59,12 @@ malloc_from_heap (size_t size, struct heap *heap)
/* Include extra space to record the size of the allocated block. */
size += MALLOC_HEADER_SIZE;
- __heap_lock (heap);
+ __pthread_mutex_lock (heap_lock);
/* First try to get memory that's already in our heap. */
mem = __heap_alloc (heap, &size);
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
if (unlikely (! mem))
/* We couldn't allocate from the heap, so grab some more
@@ -126,7 +128,7 @@ malloc_from_heap (size_t size, struct heap *heap)
(long)block, (long)block + block_size, block_size);
/* Get back the heap lock. */
- __heap_lock (heap);
+ __pthread_mutex_lock (heap_lock);
/* Put BLOCK into the heap. */
__heap_free (heap, block, block_size);
@@ -136,7 +138,7 @@ malloc_from_heap (size_t size, struct heap *heap)
/* Try again to allocate. */
mem = __heap_alloc (heap, &size);
- __heap_unlock (heap);
+ __pthread_mutex_unlock (heap_lock);
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
/* Insert a record of BLOCK in sorted order into the
@@ -148,7 +150,7 @@ malloc_from_heap (size_t size, struct heap *heap)
if (block < mmb->mem)
break;
- new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
+ new_mmb = malloc_from_heap (sizeof *new_mmb, __malloc_mmb_heap, &__malloc_mmb_heap_lock);
new_mmb->next = mmb;
new_mmb->mem = block;
new_mmb->size = block_size;
@@ -207,7 +209,7 @@ malloc (size_t size)
if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2))))
goto oom;
- mem = malloc_from_heap (size, &__malloc_heap);
+ mem = malloc_from_heap (size, __malloc_heap, &__malloc_heap_lock);
if (unlikely (!mem))
{
oom:
diff --git a/libc/stdlib/malloc/malloc.h b/libc/stdlib/malloc/malloc.h
index 7277cd2cf..f49ed34e3 100644
--- a/libc/stdlib/malloc/malloc.h
+++ b/libc/stdlib/malloc/malloc.h
@@ -221,4 +221,8 @@ extern void __malloc_debug_printf (int indent, const char *fmt, ...);
/* The malloc heap. */
-extern struct heap __malloc_heap;
+extern struct heap_free_area *__malloc_heap;
+extern malloc_mutex_t __malloc_heap_lock;
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+extern malloc_mutex_t __malloc_mmb_heap_lock;
+#endif
diff --git a/libc/stdlib/malloc/memalign.c b/libc/stdlib/malloc/memalign.c
index 5b248f3e2..114299b43 100644
--- a/libc/stdlib/malloc/memalign.c
+++ b/libc/stdlib/malloc/memalign.c
@@ -36,7 +36,7 @@ memalign (size_t alignment, size_t size)
{
void *mem, *base;
unsigned long tot_addr, tot_end_addr, addr, end_addr;
- struct heap *heap = &__malloc_heap;
+ struct heap_free_area *heap = __malloc_heap;
/* Make SIZE something we like. */
size = HEAP_ADJUST_SIZE (size);
diff --git a/libc/stdlib/malloc/realloc.c b/libc/stdlib/malloc/realloc.c
index 948326762..b3b5bae14 100644
--- a/libc/stdlib/malloc/realloc.c
+++ b/libc/stdlib/malloc/realloc.c
@@ -59,9 +59,9 @@ realloc (void *mem, size_t new_size)
{
size_t extra = new_size - size;
- __heap_lock (&__malloc_heap);
- extra = __heap_alloc_at (&__malloc_heap, base_mem + size, extra);
- __heap_unlock (&__malloc_heap);
+ __pthread_mutex_lock (&__malloc_heap_lock);
+ extra = __heap_alloc_at (__malloc_heap, base_mem + size, extra);
+ __pthread_mutex_unlock (&__malloc_heap_lock);
if (extra)
/* Record the changed size. */
@@ -82,9 +82,9 @@ realloc (void *mem, size_t new_size)
else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size)
/* Shrink the block. */
{
- __heap_lock (&__malloc_heap);
- __heap_free (&__malloc_heap, base_mem + new_size, size - new_size);
- __heap_unlock (&__malloc_heap);
+ __pthread_mutex_lock (&__malloc_heap_lock);
+ __heap_free (__malloc_heap, base_mem + new_size, size - new_size);
+ __pthread_mutex_unlock (&__malloc_heap_lock);
MALLOC_SET_SIZE (base_mem, new_size);
}
diff --git a/libpthread/linuxthreads.old/ptfork.c b/libpthread/linuxthreads.old/ptfork.c
index c34ea8104..7a5749efc 100644
--- a/libpthread/linuxthreads.old/ptfork.c
+++ b/libpthread/linuxthreads.old/ptfork.c
@@ -20,6 +20,7 @@
#ifdef __ARCH_USE_MMU__
+#include <bits/uClibc_mutex.h>
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
@@ -36,6 +37,16 @@ static struct handler_list * pthread_atfork_prepare = NULL;
static struct handler_list * pthread_atfork_parent = NULL;
static struct handler_list * pthread_atfork_child = NULL;
+#ifdef __MALLOC__
+__UCLIBC_MUTEX_EXTERN(__malloc_heap_lock);
+__UCLIBC_MUTEX_EXTERN(__malloc_sbrk_lock);
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+__UCLIBC_MUTEX_EXTERN(__malloc_mmb_heap_lock);
+#endif
+#elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+__UCLIBC_MUTEX_EXTERN(__malloc_lock);
+#endif
+
static void pthread_insert_list(struct handler_list ** list,
void (*handler)(void),
struct handler_list * newlist,
@@ -78,6 +89,10 @@ static __inline__ void pthread_call_handlers(struct handler_list * list)
for (/*nothing*/; list != NULL; list = list->next) (list->handler)();
}
+void __pthread_once_fork_prepare(void);
+void __pthread_once_fork_child(void);
+void __pthread_once_fork_parent(void);
+
extern __typeof(fork) __libc_fork;
pid_t __fork(void) attribute_hidden;
@@ -90,14 +105,47 @@ pid_t __fork(void)
prepare = pthread_atfork_prepare;
child = pthread_atfork_child;
parent = pthread_atfork_parent;
- __pthread_mutex_unlock(&pthread_atfork_lock);
pthread_call_handlers(prepare);
+
+ __pthread_once_fork_prepare();
+#ifdef __MALLOC__
+ __pthread_mutex_lock(&__malloc_sbrk_lock);
+ __pthread_mutex_lock(&__malloc_heap_lock);
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+ __pthread_mutex_lock(&__malloc_mmb_heap_lock);
+#endif
+#elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+ __pthread_mutex_lock(&__malloc_lock);
+#endif
+
pid = __libc_fork();
if (pid == 0) {
+#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+ __libc_lock_init_recursive(__malloc_lock);
+#elif defined(__MALLOC__)
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+ __libc_lock_init_adaptive(__malloc_mmb_heap_lock);
+#endif
+ __libc_lock_init_adaptive(__malloc_heap_lock);
+ __libc_lock_init(__malloc_sbrk_lock);
+#endif
+ __libc_lock_init_adaptive(pthread_atfork_lock);
__pthread_reset_main_thread();
__fresetlockfiles();
+ __pthread_once_fork_child();
pthread_call_handlers(child);
} else {
+#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
+ __pthread_mutex_unlock(&__malloc_lock);
+#elif defined(__MALLOC__)
+#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
+ __pthread_mutex_unlock(&__malloc_mmb_heap_lock);
+#endif
+ __pthread_mutex_unlock(&__malloc_heap_lock);
+ __pthread_mutex_unlock(&__malloc_sbrk_lock);
+#endif
+ __pthread_mutex_unlock(&pthread_atfork_lock);
+ __pthread_once_fork_parent();
pthread_call_handlers(parent);
}
return pid;
diff --git a/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h b/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
index 740e793be..78593ac11 100644
--- a/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
+++ b/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
@@ -30,7 +30,7 @@
/* Mutex type. */
#if defined(_LIBC) || defined(_IO_MTSAFE_IO)
typedef pthread_mutex_t __libc_lock_t;
-typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
+typedef pthread_mutex_t __libc_lock_recursive_t;
# ifdef __USE_UNIX98
typedef pthread_rwlock_t __libc_rwlock_t;
# else
@@ -132,15 +132,39 @@ typedef pthread_key_t __libc_key_t;
#define __libc_rwlock_init(NAME) \
(__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));
+/* Same as last but this time we initialize an adaptive mutex. */
+#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
+#define __libc_lock_init_adaptive(NAME) \
+ ({ \
+ (NAME).__m_count = 0; \
+ (NAME).__m_owner = NULL; \
+ (NAME).__m_kind = PTHREAD_MUTEX_ADAPTIVE_NP; \
+ (NAME).__m_lock.__status = 0; \
+ (NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT; \
+ 0; })
+#else
+#define __libc_lock_init_adaptive(NAME) \
+ do { \
+ if (__pthread_mutex_init != NULL) \
+ { \
+ pthread_mutexattr_t __attr; \
+ __pthread_mutexattr_init (&__attr); \
+ __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_ADAPTIVE_NP); \
+ __pthread_mutex_init (&(NAME), &__attr); \
+ __pthread_mutexattr_destroy (&__attr); \
+ } \
+ } while (0);
+#endif
+
/* Same as last but this time we initialize a recursive mutex. */
#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
#define __libc_lock_init_recursive(NAME) \
({ \
- (NAME).mutex.__m_count = 0; \
- (NAME).mutex.__m_owner = NULL; \
- (NAME).mutex.__m_kind = PTHREAD_MUTEX_RECURSIVE_NP; \
- (NAME).mutex.__m_lock.__status = 0; \
- (NAME).mutex.__m_lock.__spinlock = __LT_SPINLOCK_INIT; \
+ (NAME).__m_count = 0; \
+ (NAME).__m_owner = NULL; \
+ (NAME).__m_kind = PTHREAD_MUTEX_RECURSIVE_NP; \
+ (NAME).__m_lock.__status = 0; \
+ (NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT; \
0; })
#else
#define __libc_lock_init_recursive(NAME) \
@@ -150,7 +174,7 @@ typedef pthread_key_t __libc_key_t;
pthread_mutexattr_t __attr; \
__pthread_mutexattr_init (&__attr); \
__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
- __pthread_mutex_init (&(NAME).mutex, &__attr); \
+ __pthread_mutex_init (&(NAME), &__attr); \
__pthread_mutexattr_destroy (&__attr); \
} \
} while (0);