diff options
| author | Austin Foxley <austinf@cetoncorp.com> | 2009-12-19 14:02:14 -0800 | 
|---|---|---|
| committer | Austin Foxley <austinf@cetoncorp.com> | 2009-12-19 14:03:00 -0800 | 
| commit | 875d11eec5df38bae0003df4a884ef962cf28590 (patch) | |
| tree | 83667b7ab2470a3aba40fb9ee868784d77d7e900 /libc/stdlib/malloc | |
| parent | f3217f9be3225c4943677d03b274cbc0cb4ed228 (diff) | |
| parent | 23528282b771d1af3df0fa17f1e909ad3b663f59 (diff) | |
Merge commit 'origin/master' into nptl
Conflicts:
	libc/signal/sigpause.c
	libc/string/x86_64/memset.S
Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
Diffstat (limited to 'libc/stdlib/malloc')
| -rw-r--r-- | libc/stdlib/malloc/free.c | 6 | ||||
| -rw-r--r-- | libc/stdlib/malloc/malloc.c | 7 | 
2 files changed, 7 insertions, 6 deletions
| diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c index c17e7ec2a..e7b6a290a 100644 --- a/libc/stdlib/malloc/free.c +++ b/libc/stdlib/malloc/free.c @@ -177,14 +177,14 @@ __free_to_heap (void *mem, struct heap_free_area **heap  	      /* Start searching again from the end of this block.  */  	      start = mmb_end; +	      /* Release the descriptor block we used.  */ +	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); +  	      /* We have to unlock the heap before we recurse to free the mmb  		 descriptor, because we might be unmapping from the mmb  		 heap.  */                __heap_unlock (heap_lock); -	      /* Release the descriptor block we used.  */ -	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); -  	      /* Do the actual munmap.  */  	      munmap ((void *)mmb_start, mmb_end - mmb_start); diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c index 337206f09..d58a7d0ee 100644 --- a/libc/stdlib/malloc/malloc.c +++ b/libc/stdlib/malloc/malloc.c @@ -46,7 +46,7 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;  HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */  struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);  #ifdef HEAP_USE_LOCKING -malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER; +malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;  #endif  #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ @@ -149,19 +149,19 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap  	  /* Try again to allocate.  */  	  mem = __heap_alloc (heap, &size); -	  __heap_unlock (heap_lock);  #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)  	  /* Insert a record of BLOCK in sorted order into the  	     __malloc_mmapped_blocks list.  */ +	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); +  	  for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;  	       mmb;  	       prev_mmb = mmb, mmb = mmb->next)  	    if (block < mmb->mem)  	      break; -	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);  	  new_mmb->next = mmb;  	  new_mmb->mem = block;  	  new_mmb->size = block_size; @@ -175,6 +175,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap  			    (unsigned)new_mmb,  			    (unsigned)new_mmb->mem, block_size);  #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ +	  __heap_unlock (heap_lock);  	}      } | 
