summaryrefslogtreecommitdiff
path: root/libpthread/nptl/sysdeps/unix/sysv/linux/i386
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/nptl/sysdeps/unix/sysv/linux/i386')
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h9
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S349
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S233
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S71
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S152
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S155
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S384
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S383
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S74
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S96
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S92
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S59
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S69
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S76
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S266
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S243
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S20
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S20
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h703
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h51
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S34
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S32
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h38
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h18
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S2
30 files changed, 2762 insertions, 899 deletions
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch
index 740ee7fcb..9bb19381c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch
@@ -15,7 +15,7 @@ libc_a_CSRC = fork.c
libc_a_SSRC = clone.S vfork.S
libpthread_SSRC += i486/lowlevellock.S i486/pthread_barrier_wait.S i486/pthread_cond_signal.S i486/pthread_cond_broadcast.S \
- i486/sem_post.S i486/sem_timedwait.S \
+ i486/lowlevelrobustlock.S i486/sem_post.S i486/sem_timedwait.S \
i486/sem_trywait.S i486/sem_wait.S i486/pthread_rwlock_rdlock.S i486/pthread_rwlock_wrlock.S \
i486/pthread_rwlock_timedrdlock.S i486/pthread_rwlock_timedwrlock.S i486/pthread_rwlock_unlock.S
#i486/pthread_cond_timedwait.S i486/pthread_cond_wait.S
@@ -31,6 +31,7 @@ endif
ASFLAGS-pt-vfork.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-lowlevellock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-lowlevelrobustlock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-pthread_once.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-pthread_spin_unlock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
index 0ec6e5534..9e3e016fb 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -128,7 +128,10 @@ typedef union
unsigned int __nr_writers_queued;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
- unsigned int __flags;
+ unsigned char __flags;
+ unsigned char __shared;
+ unsigned char __pad1;
+ unsigned char __pad2;
int __writer;
} __data;
char __size[__SIZEOF_PTHREAD_RWLOCK_T];
@@ -165,6 +168,6 @@ typedef union
/* Extra attributes for the cleanup functions. */
-#define __cleanup_fct_attribute __attribute ((regparm (1)))
+#define __cleanup_fct_attribute __attribute__ ((__regparm__ (1)))
#endif /* bits/pthreadtypes.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h
index e6c5d845c..934493c30 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h
@@ -28,9 +28,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c
index b874538b3..813e5299a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c
@@ -19,10 +19,10 @@
#include <sched.h>
#include <signal.h>
-#include <stdio.h>
#include <sysdep.h>
#include <tls.h>
+
#define ARCH_FORK() \
INLINE_SYSCALL (clone, 5, \
CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, 0, \
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
index 223b11108..ce8ad27aa 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -17,14 +17,4 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-/* In libc.so we do not unconditionally use the lock prefix. Only if
- the application is using threads. */
-#ifndef UP
-# define LOCK \
- cmpl $0, %gs:MULTIPLE_THREADS_OFFSET; \
- je,pt 0f; \
- lock; \
-0:
-#endif
-
#include "lowlevellock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
index 955e119ab..61255a0af 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,34 +19,74 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %gs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
#endif
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
+ cfi_startproc
pushl %edx
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edx, -8)
+ cfi_offset(%ebx, -12)
+ cfi_offset(%esi, -16)
movl $2, %edx
movl %ecx, %ebx
xorl %esi, %esi /* No timeout. */
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ LOAD_PRIVATE_FUTEX_WAIT (%ecx)
cmpl %edx, %eax /* NB: %edx == 2 */
jne 2f
@@ -58,41 +98,162 @@ __lll_mutex_lock_wait:
xchgl %eax, (%ebx) /* NB: lock is implied */
testl %eax, %eax
- jnz,pn 1b
+ jnz 1b
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
ret
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ cfi_endproc
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
+ .align 16
+__lll_lock_wait:
+ cfi_startproc
+ pushl %edx
+ cfi_adjust_cfa_offset(4)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edx, -8)
+ cfi_offset(%ebx, -12)
+ cfi_offset(%esi, -16)
+
+ movl %edx, %ebx
+ movl $2, %edx
+ xorl %esi, %esi /* No timeout. */
+ LOAD_FUTEX_WAIT (%ecx)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+2: movl %edx, %eax
+ xchgl %eax, (%ebx) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
+ ret
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ /* %ecx: futex
+ %esi: flags
+ %edx: timeout
+ %eax: futex value
+ */
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
.align 16
-__lll_mutex_timedlock_wait:
+__lll_timedlock_wait:
+ cfi_startproc
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ LOAD_PIC_REG (bx)
+ cmpl $0, __have_futex_clock_realtime@GOTOFF(%ebx)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ movl %ecx, %ebx
+ movl %esi, %ecx
+ movl %edx, %esi
+ movl $0xffffffff, %ebp
+ LOAD_FUTEX_WAIT_ABS (%ecx)
+
+ movl $2, %edx
+ cmpl %edx, %eax
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ movl $2, %edx
+ ENTER_KERNEL
+
+2: xchgl %edx, (%ebx) /* NB: lock is implied */
+
+ testl %edx, %edx
+ jz 3f
+
+ cmpl $-ETIMEDOUT, %eax
+ je 4f
+ cmpl $-EINVAL, %eax
+ jne 1b
+4: movl %eax, %edx
+ negl %edx
+
+3: movl %edx, %eax
+7: popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
+ ret
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
/* Check for a valid timeout value. */
cmpl $1000000000, 4(%edx)
jae 3f
- pushl %edi
pushl %esi
- pushl %ebx
- pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
+ pushl %edi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
/* Stack frame for the timespec and timeval structs. */
subl $8, %esp
+ cfi_adjust_cfa_offset(8)
movl %ecx, %ebp
movl %edx, %edi
+ movl $2, %edx
+ xchgl %edx, (%ebp)
+
+ test %edx, %edx
+ je 6f
+
1:
/* Get current time. */
movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -107,116 +268,128 @@ __lll_mutex_timedlock_wait:
addl $1000000000, %edx
subl $1, %ecx
4: testl %ecx, %ecx
- js 5f /* Time is already up. */
+ js 2f /* Time is already up. */
/* Store relative timeout. */
movl %ecx, (%esp)
movl %edx, 4(%esp)
+ /* Futex call. */
movl %ebp, %ebx
-
- movl $1, %eax
movl $2, %edx
- LOCK
- cmpxchgl %edx, (%ebx)
-
- testl %eax, %eax
- je 8f
-
- /* Futex call. */
movl %esp, %esi
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl 16(%esp), %ecx
+ LOAD_FUTEX_WAIT (%ecx)
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %ecx
-8: /* NB: %edx == 2 */
- xorl %eax, %eax
- LOCK
- cmpxchgl %edx, (%ebx)
+ /* NB: %edx == 2 */
+ xchgl %edx, (%ebp)
- jnz 7f
+ testl %edx, %edx
+ je 6f
+
+ cmpl $-ETIMEDOUT, %eax
+ jne 1b
+2: movl $ETIMEDOUT, %edx
6: addl $8, %esp
- popl %ebp
- popl %ebx
- popl %esi
+ cfi_adjust_cfa_offset(-8)
popl %edi
- ret
-
- /* Check whether the time expired. */
-7: cmpl $-ETIMEDOUT, %ecx
- je 5f
-
- /* Make sure the current holder knows we are going to sleep. */
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+7: popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
movl %edx, %eax
- xchgl %eax, (%ebx)
- testl %eax, %eax
- jz 6b
- jmp 1b
-
-3: movl $EINVAL, %eax
ret
-5: movl $ETIMEDOUT, %eax
- jmp 6b
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+3: movl $EINVAL, %edx
+ jmp 7b
+# endif
+ cfi_endproc
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
-
-#ifdef NOT_IN_libc
- .globl lll_unlock_wake_cb
- .type lll_unlock_wake_cb,@function
- .hidden lll_unlock_wake_cb
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
.align 16
-lll_unlock_wake_cb:
+__lll_unlock_wake_private:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %ecx
+ cfi_adjust_cfa_offset(4)
pushl %edx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%ecx, -12)
+ cfi_offset(%edx, -16)
- movl 20(%esp), %ebx
- LOCK
- subl $1, (%ebx)
- je 1f
-
- movl $FUTEX_WAKE, %ecx
+ movl %eax, %ebx
+ movl $0, (%eax)
+ LOAD_PRIVATE_FUTEX_WAKE (%ecx)
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
- movl $0, (%ebx)
ENTER_KERNEL
-1: popl %edx
+ popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
popl %ecx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ecx)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
- .size lll_unlock_wake_cb,.-lll_unlock_wake_cb
-#endif
-
+ cfi_endproc
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
.align 16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %ecx
+ cfi_adjust_cfa_offset(4)
pushl %edx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%ecx, -12)
+ cfi_offset(%edx, -16)
movl %eax, %ebx
movl $0, (%eax)
- movl $FUTEX_WAKE, %ecx
+ LOAD_FUTEX_WAKE (%ecx)
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
ENTER_KERNEL
popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
popl %ecx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ecx)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
-
-#ifdef NOT_IN_libc
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
@@ -234,7 +407,7 @@ __lll_timedwait_tid:
/* Get current time. */
2: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -259,6 +432,8 @@ __lll_timedwait_tid:
jz 4f
movl %esp, %esi
+ /* XXX The kernel so far uses global futex for the wakeup at
+ all times. */
xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
movl %ebp, %ebx
movl $SYS_futex, %eax
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
new file mode 100644
index 000000000..596763444
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
@@ -0,0 +1,233 @@
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <bits/kernel-features.h>
+
+ .text
+
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+#endif
+
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
+ .align 16
+__lll_robust_lock_wait:
+ cfi_startproc
+ pushl %edx
+ cfi_adjust_cfa_offset(4)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edx, -8)
+ cfi_offset(%ebx, -12)
+ cfi_offset(%esi, -16)
+
+ movl %edx, %ebx
+ xorl %esi, %esi /* No timeout. */
+ LOAD_FUTEX_WAIT (%ecx)
+
+4: movl %eax, %edx
+ orl $FUTEX_WAITERS, %edx
+
+ testl $FUTEX_OWNER_DIED, %eax
+ jnz 3f
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ je 1f
+
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ jnz 2f
+
+1: movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+ movl (%ebx), %eax
+
+2: test %eax, %eax
+ jne 4b
+
+ movl %gs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ jnz 4b
+ /* NB: %eax == 0 */
+
+3: popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
+ ret
+ cfi_endproc
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
+ .align 16
+__lll_robust_timedlock_wait:
+ cfi_startproc
+ /* Check for a valid timeout value. */
+ cmpl $1000000000, 4(%edx)
+ jae 3f
+
+ pushl %edi
+ cfi_adjust_cfa_offset(4)
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edi, -8)
+ cfi_offset(%esi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
+
+ /* Stack frame for the timespec and timeval structs. */
+ subl $12, %esp
+ cfi_adjust_cfa_offset(12)
+
+ movl %ecx, %ebp
+ movl %edx, %edi
+
+1: movl %eax, 8(%esp)
+
+ /* Get current time. */
+ movl %esp, %ebx
+ xorl %ecx, %ecx
+ movl $__NR_gettimeofday, %eax
+ ENTER_KERNEL
+
+ /* Compute relative timeout. */
+ movl 4(%esp), %eax
+ movl $1000, %edx
+ mul %edx /* Milli seconds to nano seconds. */
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ subl (%esp), %ecx
+ subl %eax, %edx
+ jns 4f
+ addl $1000000000, %edx
+ subl $1, %ecx
+4: testl %ecx, %ecx
+ js 8f /* Time is already up. */
+
+ /* Store relative timeout. */
+ movl %ecx, (%esp)
+ movl %edx, 4(%esp)
+
+ movl %ebp, %ebx
+
+ movl 8(%esp), %edx
+ movl %edx, %eax
+ orl $FUTEX_WAITERS, %edx
+
+ testl $FUTEX_OWNER_DIED, %eax
+ jnz 6f
+
+ cmpl %eax, %edx
+ je 2f
+
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ movl $0, %ecx /* Must use mov to avoid changing cc. */
+ jnz 5f
+
+2:
+ /* Futex call. */
+ movl %esp, %esi
+ movl 20(%esp), %ecx
+ LOAD_FUTEX_WAIT (%ecx)
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ movl %eax, %ecx
+
+ movl (%ebx), %eax
+
+5: testl %eax, %eax
+ jne 7f
+
+ movl %gs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ jnz 7f
+
+6: addl $12, %esp
+ cfi_adjust_cfa_offset(-12)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+ popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
+ ret
+
+3: movl $EINVAL, %eax
+ ret
+
+ cfi_adjust_cfa_offset(28)
+ cfi_offset(%edi, -8)
+ cfi_offset(%esi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
+ /* Check whether the time expired. */
+7: cmpl $-ETIMEDOUT, %ecx
+ jne 1b
+
+8: movl $ETIMEDOUT, %eax
+ jmp 6b
+ cfi_endproc
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
index 2af9e38cd..040d7f8c3 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,25 +18,19 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelbarrier.h>
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_barrier_wait
.type pthread_barrier_wait,@function
.align 16
pthread_barrier_wait:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
movl 8(%esp), %ebx
@@ -54,6 +48,8 @@ pthread_barrier_wait:
/* There are more threads to come. */
pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -12)
#if CURR_EVENT == 0
movl (%ebx), %edx
@@ -68,7 +64,13 @@ pthread_barrier_wait:
/* Wait for the remaining threads. The call will return immediately
if the CURR_EVENT memory has meanwhile been changed. */
-7: xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+7:
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%ebx), %ecx
+#else
+ movl $FUTEX_WAIT, %ecx
+ orl PRIVATE(%ebx), %ecx
+#endif
xorl %esi, %esi
8: movl $SYS_futex, %eax
ENTER_KERNEL
@@ -81,7 +83,7 @@ pthread_barrier_wait:
#else
cmpl %edx, CURR_EVENT(%ebx)
#endif
- je,pn 8b
+ je 8b
/* Increment LEFT. If this brings the count back to the
initial count unlock the object. */
@@ -91,7 +93,7 @@ pthread_barrier_wait:
xaddl %edx, LEFT(%ebx)
subl $1, %ecx
cmpl %ecx, %edx
- jne,pt 10f
+ jne 10f
/* Release the mutex. We cannot release the lock before
waking the waiting threads since otherwise a new thread might
@@ -104,9 +106,16 @@ pthread_barrier_wait:
10: movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+
/* The necessary number of threads arrived. */
3:
#if CURR_EVENT == 0
@@ -119,6 +128,7 @@ pthread_barrier_wait:
so 0x7fffffff is the highest value. */
movl $0x7fffffff, %edx
movl $FUTEX_WAKE, %ecx
+ orl PRIVATE(%ebx), %ecx
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -130,7 +140,7 @@ pthread_barrier_wait:
xaddl %edx, LEFT(%ebx)
subl $1, %ecx
cmpl %ecx, %edx
- jne,pt 5f
+ jne 5f
/* Release the mutex. We cannot release the lock before
waking the waiting threads since otherwise a new thread might
@@ -142,21 +152,36 @@ pthread_barrier_wait:
5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
-1: leal MUTEX(%ebx), %ecx
- call __lll_mutex_lock_wait
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+1: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
+ xorl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 2b
-4: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+4: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 5b
-6: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -12)
+6: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 7b
-9: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+9: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 10b
+ cfi_endproc
.size pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
index 6e8ffe6f6..669b96a95 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2006,2007,2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,22 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_CMP_REQUEUE 4
-
-#define EINVAL 22
-
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
.text
@@ -42,11 +31,20 @@
.type __pthread_cond_broadcast, @function
.align 16
__pthread_cond_broadcast:
-
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
pushl %edi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
+ cfi_remember_state
movl 20(%esp), %ebx
@@ -92,8 +90,24 @@ __pthread_cond_broadcast:
8: cmpl $-1, %edi
je 9f
+ /* Do not use requeue for pshared condvars. */
+ testl $PS_BIT, MUTEX_KIND(%edi)
+ jne 9f
+
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%edi), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ je 81f
+
/* Wake up all threads. */
- movl $FUTEX_CMP_REQUEUE, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %ecx
+#else
+ movl %gs:PRIVATE_FUTEX, %ecx
+ orl $FUTEX_CMP_REQUEUE, %ecx
+#endif
movl $SYS_futex, %eax
movl $0x7fffffff, %esi
movl $1, %edx
@@ -111,51 +125,113 @@ __pthread_cond_broadcast:
cmpl $0xfffff001, %eax
jae 9f
-10: xorl %eax, %eax
+6: xorl %eax, %eax
popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
- .align 16
- /* Unlock. */
-4: LOCK
- subl $1, cond_lock-cond_futex(%ebx)
- jne 5f
+ cfi_restore_state
-6: xorl %eax, %eax
- popl %ebp
- popl %edi
- popl %esi
- popl %ebx
- ret
+81: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+ movl $SYS_futex, %eax
+ movl $0x7fffffff, %esi
+ movl $1, %edx
+ /* Get the address of the futex involved. */
+# if MUTEX_FUTEX != 0
+ addl $MUTEX_FUTEX, %edi
+# endif
+ int $0x80
+
+ /* For any kind of error, which mainly is EAGAIN, we try again
+ with WAKE. The general test also covers running on old
+ kernels. */
+ cmpl $0xfffff001, %eax
+ jb 6b
+ jmp 9f
/* Initial locking failed. */
1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_lock_wait
jmp 2b
- /* Unlock in loop requires waekup. */
+ .align 16
+ /* Unlock. */
+4: LOCK
+ subl $1, cond_lock-cond_futex(%ebx)
+ je 6b
+
+ /* Unlock in loop requires wakeup. */
5: leal cond_lock-cond_futex(%ebx), %eax
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
jmp 6b
- /* Unlock in loop requires waekup. */
+ /* Unlock in loop requires wakeup. */
7: leal cond_lock-cond_futex(%ebx), %eax
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
jmp 8b
9: /* The futex requeue functionality is not available. */
movl $0x7fffffff, %edx
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $SYS_futex, %eax
ENTER_KERNEL
- jmp 10b
+ jmp 6b
+ cfi_endproc
.size __pthread_cond_broadcast, .-__pthread_cond_broadcast
weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
index ec8217987..54e80d059 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2005,2007,2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,20 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-
-#define EINVAL 22
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
.text
@@ -42,8 +33,14 @@
.align 16
__pthread_cond_signal:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
pushl %edi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
+ cfi_remember_state
movl 12(%esp), %edi
@@ -77,35 +74,141 @@ __pthread_cond_signal:
addl $1, (%ebx)
/* Wake up one thread. */
- movl $FUTEX_WAKE, %ecx
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
+
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ sete %cl
+ je 8f
+
+ movl dep_mutex-cond_futex(%ebx), %edx
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%edx), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ je 9f
+
+8: subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE_OP, %ecx
movl $SYS_futex, %eax
movl $1, %edx
+ movl $1, %esi
+ movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp
+ /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
+ sysenter.
+ ENTER_KERNEL */
+ int $0x80
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ cmpl $-4095, %eax
+ jae 7f
+
+6: xorl %eax, %eax
+ popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ ret
+
+ cfi_restore_state
+
+9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+ movl $SYS_futex, %eax
+ movl $1, %edx
+ xorl %esi, %esi
+ movl dep_mutex-cond_futex(%ebx), %edi
+ movl (%ebx), %ebp
+ /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
+ sysenter.
+ ENTER_KERNEL */
+ int $0x80
+ popl %ebp
+ popl %esi
+
+ leal -cond_futex(%ebx), %edi
+
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ cmpl $-4095, %eax
+ jb 4f
+
+7:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ orl $FUTEX_WAKE, %ecx
+
+ xorl $(FUTEX_WAKE ^ FUTEX_WAKE_OP), %ecx
+ movl $SYS_futex, %eax
+ /* %edx should be 1 already from $FUTEX_WAKE_OP syscall.
+ movl $1, %edx */
ENTER_KERNEL
/* Unlock. Note that at this point %edi always points to
cond_lock. */
4: LOCK
subl $1, (%edi)
- jne 5f
+ je 6b
-6: xorl %eax, %eax
- popl %edi
- popl %ebx
- ret
+ /* Unlock in loop requires wakeup. */
+5: movl %edi, %eax
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
+ jmp 6b
/* Initial locking failed. */
1:
#if cond_lock == 0
- movl %edi, %ecx
+ movl %edi, %edx
#else
- leal cond_lock(%edi), %ecx
+ leal cond_lock(%edi), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- call __lll_mutex_lock_wait
+ cmpl $-1, dep_mutex(%edi)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_lock_wait
jmp 2b
- /* Unlock in loop requires wakeup. */
-5: movl %edi, %eax
- call __lll_mutex_unlock_wake
- jmp 6b
+ cfi_endproc
.size __pthread_cond_signal, .-__pthread_cond_signal
weak_alias(__pthread_cond_signal, pthread_cond_signal)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
index b8f0d2e4b..c56dd7716 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004,2006-2007,2009,2010 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,17 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <pthread-errnos.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#include <pthread-pi-defines.h>
+#include <bits/kernel-features.h>
.text
@@ -40,14 +34,28 @@
.align 16
__pthread_cond_timedwait:
.LSTARTCODE:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
pushl %ebp
-.Lpush_ebp:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
pushl %edi
-.Lpush_edi:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
pushl %esi
-.Lpush_esi:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
pushl %ebx
-.Lpush_ebx:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
movl 20(%esp), %ebx
movl 28(%esp), %ebp
@@ -84,11 +92,12 @@ __pthread_cond_timedwait:
addl $1, total_seq(%ebx)
adcl $0, total_seq+4(%ebx)
addl $1, cond_futex(%ebx)
- addl $(1 << clock_bits), cond_nwaiters(%ebx)
+ addl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
-#define FRAME_SIZE 24
+#define FRAME_SIZE 32
subl $FRAME_SIZE, %esp
-.Lsubl:
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+ cfi_remember_state
/* Get and store current wakeup_seq value. */
movl wakeup_seq(%ebx), %edi
@@ -98,12 +107,14 @@ __pthread_cond_timedwait:
movl %edx, 16(%esp)
movl %eax, 20(%esp)
+ /* Reset the pi-requeued flag. */
+8: movl $0, 24(%esp)
/* Get the current time. */
-8: movl %ebx, %edx
+ movl %ebx, %edx
#ifdef __NR_clock_gettime
/* Get the clock number. */
movl cond_nwaiters(%ebx), %ebx
- andl $((1 << clock_bits) - 1), %ebx
+ andl $((1 << nwaiters_shift) - 1), %ebx
/* Only clocks 0 and 1 are allowed so far. Both are handled in the
kernel. */
leal 4(%esp), %ecx
@@ -124,7 +135,7 @@ __pthread_cond_timedwait:
/* Get the current time. */
leal 4(%esp), %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
movl %edx, %ebx
@@ -149,6 +160,7 @@ __pthread_cond_timedwait:
movl %edx, 8(%esp)
movl cond_futex(%ebx), %edi
+ movl %edi, 28(%esp)
/* Unlock. */
LOCK
@@ -163,9 +175,60 @@ __pthread_cond_timedwait:
4: call __pthread_enable_asynccancel
movl %eax, (%esp)
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ sete %cl
+ je 40f
+
+ movl dep_mutex(%ebx), %edi
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%edi), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ jne 40f
+
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+ /* The following only works like this because we only support
+ two clocks, represented using a single bit. */
+ testl $1, cond_nwaiters(%ebx)
+ /* XXX Need to implement using sete instead of a jump. */
+ jne 42f
+ orl $FUTEX_CLOCK_REALTIME, %ecx
+
+ /* Requeue-PI uses absolute timeout */
+42: leal (%ebp), %esi
+ movl 28(%esp), %edx
+ addl $cond_futex, %ebx
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ subl $cond_futex, %ebx
+ movl %eax, %esi
+ /* Set the pi-requeued flag only if the kernel has returned 0. The
+ kernel does not hold the mutex on ETIMEDOUT or any other error. */
+ cmpl $0, %eax
+ sete 24(%esp)
+ je 41f
+
+ /* Normal and PI futexes dont mix. Use normal futex functions only
+ if the kernel does not support the PI futex functions. */
+ cmpl $-ENOSYS, %eax
+ jne 41f
+ xorl %ecx, %ecx
+
+40: subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+#if FUTEX_WAIT != 0
+ addl $FUTEX_WAIT, %ecx
+#endif
leal 4(%esp), %esi
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
- movl %edi, %edx
+ movl 28(%esp), %edx
addl $cond_futex, %ebx
.Ladd_cond_futex:
movl $SYS_futex, %eax
@@ -174,7 +237,7 @@ __pthread_cond_timedwait:
.Lsub_cond_futex:
movl %eax, %esi
- movl (%esp), %eax
+41: movl (%esp), %eax
call __pthread_disable_asynccancel
.LcleanupEND:
@@ -225,7 +288,7 @@ __pthread_cond_timedwait:
14: addl $1, woken_seq(%ebx)
adcl $0, woken_seq+4(%ebx)
-24: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+24: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
/* Wake up a thread which wants to destroy the condvar object. */
movl total_seq(%ebx), %eax
@@ -233,12 +296,23 @@ __pthread_cond_timedwait:
cmpl $0xffffffff, %eax
jne 25f
movl cond_nwaiters(%ebx), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 25f
addl $cond_nwaiters, %ebx
movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $1, %edx
ENTER_KERNEL
subl $cond_nwaiters, %ebx
@@ -251,11 +325,15 @@ __pthread_cond_timedwait:
#endif
jne 10f
- /* Remove cancellation handler. */
11: movl 24+FRAME_SIZE(%esp), %eax
+ /* With requeue_pi, the mutex lock is held in the kernel. */
+ movl 24(%esp), %ecx
+ testl %ecx, %ecx
+ jnz 27f
+
call __pthread_mutex_cond_lock
- addl $FRAME_SIZE, %esp
-.Laddl:
+26: addl $FRAME_SIZE, %esp
+ cfi_adjust_cfa_offset(-FRAME_SIZE);
/* We return the result of the mutex_lock operation if it failed. */
testl %eax, %eax
@@ -268,46 +346,118 @@ __pthread_cond_timedwait:
#endif
18: popl %ebx
-.Lpop_ebx:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %esi
-.Lpop_esi:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %edi
-.Lpop_edi:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %ebp
-.Lpop_ebp:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
ret
+ cfi_restore_state
+
+27: call __pthread_mutex_cond_lock_adjust
+ xorl %eax, %eax
+ jmp 26b
+
+ cfi_adjust_cfa_offset(-FRAME_SIZE);
/* Initial locking failed. */
1:
-.LSbl1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- call __lll_mutex_lock_wait
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_lock_wait
jmp 2b
+ /* The initial unlocking of the mutex failed. */
+16:
+ LOCK
+#if cond_lock == 0
+ subl $1, (%ebx)
+#else
+ subl $1, cond_lock(%ebx)
+#endif
+ jne 18b
+
+ movl %eax, %esi
+#if cond_lock == 0
+ movl %ebx, %eax
+#else
+ leal cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
+
+ movl %esi, %eax
+ jmp 18b
+
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+
/* Unlock in loop requires wakeup. */
3:
-.LSbl2:
#if cond_lock == 0
movl %ebx, %eax
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
5:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_lock_wait
+ call __lll_lock_wait
jmp 6b
/* Unlock after loop requires wakeup. */
@@ -317,37 +467,24 @@ __pthread_cond_timedwait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
- jmp 11b
-
- /* The initial unlocking of the mutex failed. */
-16:
-.LSbl3:
- LOCK
-#if cond_lock == 0
- subl $1, (%ebx)
-#else
- subl $1, cond_lock(%ebx)
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- jne 18b
-
- movl %eax, %esi
-#if cond_lock == 0
- movl %ebx, %eax
-#else
- leal cond_lock(%ebx), %eax
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_unlock_wake
-
- movl %esi, %eax
- jmp 18b
+ call __lll_unlock_wake
+ jmp 11b
#if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
/* clock_gettime not available. */
-.LSbl4:
19: leal 4(%esp), %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
movl %edx, %ebx
@@ -374,7 +511,6 @@ weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
.type __condvar_tw_cleanup2, @function
__condvar_tw_cleanup2:
subl $cond_futex, %ebx
-.LSbl5:
.size __condvar_tw_cleanup2, .-__condvar_tw_cleanup2
.type __condvar_tw_cleanup, @function
__condvar_tw_cleanup:
@@ -392,25 +528,45 @@ __condvar_tw_cleanup:
jz 1f
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_lock_wait
+ call __lll_lock_wait
1: movl broadcast_seq(%ebx), %eax
cmpl 20(%esp), %eax
jne 3f
- addl $1, wakeup_seq(%ebx)
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ movl total_seq(%ebx), %eax
+ movl total_seq+4(%ebx), %edi
+ cmpl wakeup_seq+4(%ebx), %edi
+ jb 6f
+ ja 7f
+ cmpl wakeup_seq(%ebx), %eax
+ jbe 7f
+
+6: addl $1, wakeup_seq(%ebx)
adcl $0, wakeup_seq+4(%ebx)
-
addl $1, cond_futex(%ebx)
- addl $1, woken_seq(%ebx)
+7: addl $1, woken_seq(%ebx)
adcl $0, woken_seq+4(%ebx)
-3: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
/* Wake up a thread which wants to destroy the condvar object. */
xorl %edi, %edi
@@ -419,12 +575,23 @@ __condvar_tw_cleanup:
cmpl $0xffffffff, %eax
jne 4f
movl cond_nwaiters(%ebx), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 4f
addl $cond_nwaiters, %ebx
movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $1, %edx
ENTER_KERNEL
subl $cond_nwaiters, %ebx
@@ -443,13 +610,34 @@ __condvar_tw_cleanup:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
/* Wake up all waiters to make sure no signal gets lost. */
2: testl %edi, %edi
jnz 5f
addl $cond_futex, %ebx
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $SYS_futex, %eax
movl $0x7fffffff, %edx
ENTER_KERNEL
@@ -462,4 +650,44 @@ __condvar_tw_cleanup:
call _Unwind_Resume
hlt
.LENDCODE:
+ cfi_endproc
.size __condvar_tw_cleanup, .-__condvar_tw_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format (omit)
+ .byte DW_EH_PE_omit # @TType format (omit)
+ .byte DW_EH_PE_sdata4 # call-site format
+ # DW_EH_PE_sdata4
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .long .LcleanupSTART-.LSTARTCODE
+ .long .Ladd_cond_futex-.LcleanupSTART
+ .long __condvar_tw_cleanup-.LSTARTCODE
+ .uleb128 0
+ .long .Ladd_cond_futex-.LSTARTCODE
+ .long .Lsub_cond_futex-.Ladd_cond_futex
+ .long __condvar_tw_cleanup2-.LSTARTCODE
+ .uleb128 0
+ .long .Lsub_cond_futex-.LSTARTCODE
+ .long .LcleanupEND-.Lsub_cond_futex
+ .long __condvar_tw_cleanup-.LSTARTCODE
+ .uleb128 0
+ .long .LcallUR-.LSTARTCODE
+ .long .LENDCODE-.LcallUR
+ .long 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
index 377a7340f..ab4ef0a45 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004,2006-2007,2009,2010 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,17 +18,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <tcb-offsets.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#include <pthread-errnos.h>
+#include <pthread-pi-defines.h>
+#include <bits/kernel-features.h>
.text
@@ -39,16 +34,31 @@
.align 16
__pthread_cond_wait:
.LSTARTCODE:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
pushl %edi
-.Lpush_edi:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
pushl %esi
-.Lpush_esi:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
pushl %ebx
-.Lpush_ebx:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
xorl %esi, %esi
- movl 16(%esp), %ebx
+ movl 20(%esp), %ebx
/* Get internal lock. */
movl $1, %edx
@@ -64,7 +74,7 @@ __pthread_cond_wait:
/* Store the reference to the mutex. If there is already a
different value in there this is a bad user bug. */
2: cmpl $-1, dep_mutex(%ebx)
- movl 20(%esp), %eax
+ movl 24(%esp), %eax
je 15f
movl %eax, dep_mutex(%ebx)
@@ -78,11 +88,12 @@ __pthread_cond_wait:
addl $1, total_seq(%ebx)
adcl $0, total_seq+4(%ebx)
addl $1, cond_futex(%ebx)
- addl $(1 << clock_bits), cond_nwaiters(%ebx)
+ addl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
-#define FRAME_SIZE 16
+#define FRAME_SIZE 20
subl $FRAME_SIZE, %esp
-.Lsubl:
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+ cfi_remember_state
/* Get and store current wakeup_seq value. */
movl wakeup_seq(%ebx), %edi
@@ -92,7 +103,9 @@ __pthread_cond_wait:
movl %edx, 8(%esp)
movl %eax, 12(%esp)
-8: movl cond_futex(%ebx), %edi
+ /* Reset the pi-requeued flag. */
+8: movl $0, 16(%esp)
+ movl cond_futex(%ebx), %ebp
/* Unlock. */
LOCK
@@ -107,8 +120,48 @@ __pthread_cond_wait:
4: call __pthread_enable_asynccancel
movl %eax, (%esp)
- movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
- movl %edi, %edx
+ xorl %ecx, %ecx
+ cmpl $-1, dep_mutex(%ebx)
+ sete %cl
+ je 18f
+
+ movl dep_mutex(%ebx), %edi
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%edi), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ jne 18f
+
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+ movl %ebp, %edx
+ xorl %esi, %esi
+ addl $cond_futex, %ebx
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ subl $cond_futex, %ebx
+ /* Set the pi-requeued flag only if the kernel has returned 0. The
+ kernel does not hold the mutex on error. */
+ cmpl $0, %eax
+ sete 16(%esp)
+ je 19f
+
+ /* Normal and PI futexes dont mix. Use normal futex functions only
+ if the kernel does not support the PI futex functions. */
+ cmpl $-ENOSYS, %eax
+ jne 19f
+ xorl %ecx, %ecx
+
+18: subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+#if FUTEX_WAIT != 0
+ addl $FUTEX_WAIT, %ecx
+#endif
+ movl %ebp, %edx
addl $cond_futex, %ebx
.Ladd_cond_futex:
movl $SYS_futex, %eax
@@ -116,7 +169,7 @@ __pthread_cond_wait:
subl $cond_futex, %ebx
.Lsub_cond_futex:
- movl (%esp), %eax
+19: movl (%esp), %eax
call __pthread_disable_asynccancel
.LcleanupEND:
@@ -155,7 +208,7 @@ __pthread_cond_wait:
adcl $0, woken_seq+4(%ebx)
/* Unlock */
-16: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+16: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
/* Wake up a thread which wants to destroy the condvar object. */
movl total_seq(%ebx), %eax
@@ -163,12 +216,23 @@ __pthread_cond_wait:
cmpl $0xffffffff, %eax
jne 17f
movl cond_nwaiters(%ebx), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 17f
addl $cond_nwaiters, %ebx
movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $1, %edx
ENTER_KERNEL
subl $cond_nwaiters, %ebx
@@ -181,51 +245,130 @@ __pthread_cond_wait:
#endif
jne 10f
-11: movl 20+FRAME_SIZE(%esp), %eax
+ /* With requeue_pi, the mutex lock is held in the kernel. */
+11: movl 24+FRAME_SIZE(%esp), %eax
+ movl 16(%esp), %ecx
+ testl %ecx, %ecx
+ jnz 21f
+
call __pthread_mutex_cond_lock
- addl $FRAME_SIZE, %esp
-.Laddl:
+20: addl $FRAME_SIZE, %esp
+ cfi_adjust_cfa_offset(-FRAME_SIZE);
14: popl %ebx
-.Lpop_ebx:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %esi
-.Lpop_esi:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %edi
-.Lpop_edi:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
/* We return the result of the mutex_lock operation. */
ret
+ cfi_restore_state
+
+21: call __pthread_mutex_cond_lock_adjust
+ xorl %eax, %eax
+ jmp 20b
+
+ cfi_adjust_cfa_offset(-FRAME_SIZE);
/* Initial locking failed. */
1:
-.LSbl1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- call __lll_mutex_lock_wait
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_lock_wait
jmp 2b
- /* Unlock in loop requires waekup. */
+ /* The initial unlocking of the mutex failed. */
+12:
+ LOCK
+#if cond_lock == 0
+ subl $1, (%ebx)
+#else
+ subl $1, cond_lock(%ebx)
+#endif
+ jne 14b
+
+ movl %eax, %esi
+#if cond_lock == 0
+ movl %ebx, %eax
+#else
+ leal cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
+
+ movl %esi, %eax
+ jmp 14b
+
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+
+ /* Unlock in loop requires wakeup. */
3:
-.LSbl2:
#if cond_lock == 0
movl %ebx, %eax
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
5:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_lock_wait
+ call __lll_lock_wait
jmp 6b
/* Unlock after loop requires wakeup. */
@@ -235,30 +378,18 @@ __pthread_cond_wait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
- jmp 11b
-
- /* The initial unlocking of the mutex failed. */
-12:
-.LSbl3:
- LOCK
-#if cond_lock == 0
- subl $1, (%ebx)
-#else
- subl $1, cond_lock(%ebx)
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- jne 14b
-
- movl %eax, %esi
-#if cond_lock == 0
- movl %ebx, %eax
-#else
- leal cond_lock(%ebx), %eax
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_unlock_wake
-
- movl %esi, %eax
- jmp 14b
+ call __lll_unlock_wake
+ jmp 11b
.size __pthread_cond_wait, .-__pthread_cond_wait
weak_alias(__pthread_cond_wait, pthread_cond_wait)
@@ -284,25 +415,45 @@ __condvar_w_cleanup:
jz 1f
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_lock_wait
+ call __lll_lock_wait
1: movl broadcast_seq(%ebx), %eax
cmpl 12(%esp), %eax
jne 3f
- addl $1, wakeup_seq(%ebx)
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ movl total_seq(%ebx), %eax
+ movl total_seq+4(%ebx), %edi
+ cmpl wakeup_seq+4(%ebx), %edi
+ jb 6f
+ ja 7f
+ cmpl wakeup_seq(%ebx), %eax
+ jbe 7f
+
+6: addl $1, wakeup_seq(%ebx)
adcl $0, wakeup_seq+4(%ebx)
-
addl $1, cond_futex(%ebx)
- addl $1, woken_seq(%ebx)
+7: addl $1, woken_seq(%ebx)
adcl $0, woken_seq+4(%ebx)
-3: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
/* Wake up a thread which wants to destroy the condvar object. */
xorl %edi, %edi
@@ -311,12 +462,23 @@ __condvar_w_cleanup:
cmpl $0xffffffff, %eax
jne 4f
movl cond_nwaiters(%ebx), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 4f
addl $cond_nwaiters, %ebx
movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $1, %edx
ENTER_KERNEL
subl $cond_nwaiters, %ebx
@@ -335,18 +497,39 @@ __condvar_w_cleanup:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
/* Wake up all waiters to make sure no signal gets lost. */
2: testl %edi, %edi
jnz 5f
addl $cond_futex, %ebx
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $SYS_futex, %eax
movl $0x7fffffff, %edx
ENTER_KERNEL
-5: movl 20+FRAME_SIZE(%esp), %eax
+5: movl 24+FRAME_SIZE(%esp), %eax
call __pthread_mutex_cond_lock
movl %esi, (%esp)
@@ -354,4 +537,54 @@ __condvar_w_cleanup:
call _Unwind_Resume
hlt
.LENDCODE:
+ cfi_endproc
.size __condvar_w_cleanup, .-__condvar_w_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format (omit)
+ .byte DW_EH_PE_omit # @TType format (omit)
+ .byte DW_EH_PE_sdata4 # call-site format
+ # DW_EH_PE_sdata4
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .long .LcleanupSTART-.LSTARTCODE
+ .long .Ladd_cond_futex-.LcleanupSTART
+ .long __condvar_w_cleanup-.LSTARTCODE
+ .uleb128 0
+ .long .Ladd_cond_futex-.LSTARTCODE
+ .long .Lsub_cond_futex-.Ladd_cond_futex
+ .long __condvar_w_cleanup2-.LSTARTCODE
+ .uleb128 0
+ .long .Lsub_cond_futex-.LSTARTCODE
+ .long .LcleanupEND-.Lsub_cond_futex
+ .long __condvar_w_cleanup-.LSTARTCODE
+ .uleb128 0
+ .long .LcallUR-.LSTARTCODE
+ .long .LENDCODE-.LcallUR
+ .long 0
+ .uleb128 0
+.Lcstend:
+
+#ifdef PIC
+ .section .gnu.linkonce.t.__i686.get_pc_thunk.cx,"ax",@progbits
+ .globl __i686.get_pc_thunk.cx
+ .hidden __i686.get_pc_thunk.cx
+ .type __i686.get_pc_thunk.cx,@function
+__i686.get_pc_thunk.cx:
+ movl (%esp), %ecx;
+ ret
+ .size __i686.get_pc_thunk.cx,.-__i686.get_pc_thunk.cx
+#endif
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
index aec79f07e..d181393e6 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,19 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -39,8 +30,13 @@
.type __pthread_rwlock_rdlock,@function
.align 16
__pthread_rwlock_rdlock:
+ cfi_startproc
pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -8)
+ cfi_offset(%ebx, -12)
xorl %esi, %esi
movl 12(%esp), %ebx
@@ -61,7 +57,7 @@ __pthread_rwlock_rdlock:
jne 14f
cmpl $0, WRITERS_QUEUED(%ebx)
je 5f
- cmpl $0, FLAGS(%ebx)
+ cmpb $0, FLAGS(%ebx)
je 5f
3: addl $1, READERS_QUEUED(%ebx)
@@ -77,8 +73,18 @@ __pthread_rwlock_rdlock:
#endif
jne 10f
-11: addl $READERS_WAKEUP, %ebx
- movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%ebx), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+ movzbl PSHARED(%ebx), %ecx
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %ecx
+# endif
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $READERS_WAKEUP, %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -98,7 +104,7 @@ __pthread_rwlock_rdlock:
13: subl $1, READERS_QUEUED(%ebx)
jmp 2b
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
addl $1, NR_READERS(%ebx)
je 8f
9: LOCK
@@ -110,24 +116,32 @@ __pthread_rwlock_rdlock:
jne 6f
7:
- movl %ecx, %eax
+ movl %edx, %eax
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
ret
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%esi, -8)
+ cfi_offset(%ebx, -12)
1:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
/* Deadlock detected. */
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -136,17 +150,18 @@ __pthread_rwlock_rdlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
8: subl $1, NR_READERS(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
/* Overflow. */
4: subl $1, READERS_QUEUED(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -155,17 +170,20 @@ __pthread_rwlock_rdlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 13b
+ cfi_endproc
.size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
.globl pthread_rwlock_rdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
index 3717d7ef5..1ffdf33fe 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,19 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -39,11 +30,21 @@
.type pthread_rwlock_timedrdlock,@function
.align 16
pthread_rwlock_timedrdlock:
+ cfi_startproc
pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %edi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -8)
+ cfi_offset(%edi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
subl $8, %esp
+ cfi_adjust_cfa_offset(8)
movl 28(%esp), %ebp
movl 32(%esp), %edi
@@ -64,7 +65,7 @@ pthread_rwlock_timedrdlock:
jne 14f
cmpl $0, WRITERS_QUEUED(%ebp)
je 5f
- cmpl $0, FLAGS(%ebp)
+ cmpb $0, FLAGS(%ebp)
je 5f
/* Check the value of the timeout parameter. */
@@ -87,7 +88,7 @@ pthread_rwlock_timedrdlock:
/* Get current time. */
11: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -107,13 +108,23 @@ pthread_rwlock_timedrdlock:
/* Futex call. */
movl %ecx, (%esp) /* Store relative timeout. */
movl %edx, 4(%esp)
+
movl %esi, %edx
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%ebp), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+ movzbl PSHARED(%ebp), %ecx
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %ecx
+# endif
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
movl %esp, %esi
leal READERS_WAKEUP(%ebp), %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %ecx
+ movl %eax, %esi
17:
/* Reget the lock. */
@@ -128,14 +139,14 @@ pthread_rwlock_timedrdlock:
jnz 12f
13: subl $1, READERS_QUEUED(%ebp)
- cmpl $-ETIMEDOUT, %ecx
+ cmpl $-ETIMEDOUT, %esi
jne 2b
-18: movl $ETIMEDOUT, %ecx
+18: movl $ETIMEDOUT, %edx
jmp 9f
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
addl $1, NR_READERS(%ebp)
je 8f
9: LOCK
@@ -146,27 +157,42 @@ pthread_rwlock_timedrdlock:
#endif
jne 6f
-7: movl %ecx, %eax
+7: movl %edx, %eax
addl $8, %esp
+ cfi_adjust_cfa_offset(-8)
popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
ret
+ cfi_adjust_cfa_offset(24)
+ cfi_offset(%esi, -8)
+ cfi_offset(%edi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
1:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -175,17 +201,18 @@ pthread_rwlock_timedrdlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
8: subl $1, NR_READERS(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
/* Overflow. */
4: subl $1, READERS_QUEUED(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -194,21 +221,24 @@ pthread_rwlock_timedrdlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 13b
-16: movl $-ETIMEDOUT, %ecx
+16: movl $-ETIMEDOUT, %esi
jmp 17b
-19: movl $EINVAL, %ecx
+19: movl $EINVAL, %edx
jmp 9b
+ cfi_endproc
.size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
index 09c9e30ca..5826f02e6 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,19 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -39,11 +30,21 @@
.type pthread_rwlock_timedwrlock,@function
.align 16
pthread_rwlock_timedwrlock:
+ cfi_startproc
pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %edi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -8)
+ cfi_offset(%edi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
subl $8, %esp
+ cfi_adjust_cfa_offset(8)
movl 28(%esp), %ebp
movl 32(%esp), %edi
@@ -85,7 +86,7 @@ pthread_rwlock_timedwrlock:
/* Get current time. */
11: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -105,13 +106,23 @@ pthread_rwlock_timedwrlock:
/* Futex call. */
movl %ecx, (%esp) /* Store relative timeout. */
movl %edx, 4(%esp)
+
movl %esi, %edx
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%ebp), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+ movzbl PSHARED(%ebp), %ecx
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %ecx
+# endif
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
movl %esp, %esi
leal WRITERS_WAKEUP(%ebp), %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %ecx
+ movl %eax, %esi
17:
/* Reget the lock. */
@@ -126,14 +137,14 @@ pthread_rwlock_timedwrlock:
jnz 12f
13: subl $1, WRITERS_QUEUED(%ebp)
- cmpl $-ETIMEDOUT, %ecx
+ cmpl $-ETIMEDOUT, %esi
jne 2b
-18: movl $ETIMEDOUT, %ecx
+18: movl $ETIMEDOUT, %edx
jmp 9f
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
movl %gs:TID, %eax
movl %eax, WRITER(%ebp)
9: LOCK
@@ -144,27 +155,42 @@ pthread_rwlock_timedwrlock:
#endif
jne 6f
-7: movl %ecx, %eax
+7: movl %edx, %eax
addl $8, %esp
+ cfi_adjust_cfa_offset(-8)
popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
ret
+ cfi_adjust_cfa_offset(24)
+ cfi_offset(%esi, -8)
+ cfi_offset(%edi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
1:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
-20: movl $EDEADLK, %ecx
+20: movl $EDEADLK, %edx
jmp 9b
6:
@@ -173,12 +199,13 @@ pthread_rwlock_timedwrlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
4: subl $1, WRITERS_QUEUED(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -187,21 +214,24 @@ pthread_rwlock_timedwrlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 13b
-16: movl $-ETIMEDOUT, %ecx
+16: movl $-ETIMEDOUT, %esi
jmp 17b
-19: movl $EINVAL, %ecx
+19: movl $EINVAL, %edx
jmp 9b
+ cfi_endproc
.size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
index 597c82fa8..0130261c7 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,17 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -37,8 +29,13 @@
.type __pthread_rwlock_unlock,@function
.align 16
__pthread_rwlock_unlock:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %edi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%edi, -12)
movl 12(%esp), %edi
@@ -60,9 +57,8 @@ __pthread_rwlock_unlock:
5: movl $0, WRITER(%edi)
- movl $1, %ecx
+ movl $1, %edx
leal WRITERS_WAKEUP(%edi), %ebx
- movl %ecx, %edx
cmpl $0, WRITERS_QUEUED(%edi)
jne 0f
@@ -82,14 +78,30 @@ __pthread_rwlock_unlock:
#endif
jne 7f
-8: movl $SYS_futex, %eax
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%edi), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAKE, %ecx
+#else
+ movzbl PSHARED(%edi), %ecx
+ orl $FUTEX_WAKE, %ecx
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ movl $SYS_futex, %eax
ENTER_KERNEL
xorl %eax, %eax
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%edi, -12)
.align 16
6: LOCK
#if MUTEX == 0
@@ -106,31 +118,34 @@ __pthread_rwlock_unlock:
1:
#if MUTEX == 0
- movl %edi, %ecx
+ movl %edi, %edx
#else
- leal MUTEX(%edx), %ecx
+ leal MUTEX(%edi), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%edi), %ecx
+ call __lll_lock_wait
jmp 2b
3:
#if MUTEX == 0
movl %edi, %eax
#else
- leal MUTEX(%edx), %eax
+ leal MUTEX(%edi), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%edi), %ecx
+ call __lll_unlock_wake
jmp 4b
7:
#if MUTEX == 0
movl %edi, %eax
#else
- leal MUTEX(%edx), %eax
+ leal MUTEX(%edi), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%edi), %ecx
+ call __lll_unlock_wake
jmp 8b
-
+ cfi_endproc
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
.globl pthread_rwlock_unlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
index bb384a267..f69c49b15 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,18 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -38,8 +30,13 @@
.type __pthread_rwlock_wrlock,@function
.align 16
__pthread_rwlock_wrlock:
+ cfi_startproc
pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -8)
+ cfi_offset(%ebx, -12)
xorl %esi, %esi
movl 12(%esp), %ebx
@@ -74,8 +71,18 @@ __pthread_rwlock_wrlock:
#endif
jne 10f
-11: addl $WRITERS_WAKEUP, %ebx
- movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%ebx), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+ movzbl PSHARED(%ebx), %ecx
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %ecx
+# endif
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $WRITERS_WAKEUP, %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -95,7 +102,7 @@ __pthread_rwlock_wrlock:
13: subl $1, WRITERS_QUEUED(%ebx)
jmp 2b
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
movl %gs:TID, %eax
movl %eax, WRITER(%ebx)
9: LOCK
@@ -107,23 +114,31 @@ __pthread_rwlock_wrlock:
jne 6f
7:
- movl %ecx, %eax
+ movl %edx, %eax
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
ret
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%esi, -8)
+ cfi_offset(%ebx, -12)
1:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID , %eax
jne 3b
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -132,11 +147,12 @@ __pthread_rwlock_wrlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 7b
4: subl $1, WRITERS_QUEUED(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -145,17 +161,20 @@ __pthread_rwlock_wrlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 13b
+ cfi_endproc
.size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
.globl pthread_rwlock_wrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
index a0dc39c8f..b077a20ca 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,15 +19,8 @@
#include <sysdep.h>
#include <pthread-errnos.h>
-#include <tls.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define FUTEX_WAKE 1
+#include <structsem.h>
+#include <lowlevellock.h>
.text
@@ -36,25 +29,49 @@
.type __new_sem_post,@function
.align 16
__new_sem_post:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
movl 8(%esp), %ebx
- movl $1, %edx
+
+#if VALUE == 0
+ movl (%ebx), %eax
+#else
+ movl VALUE(%ebx), %eax
+#endif
+0: cmpl $SEM_VALUE_MAX, %eax
+ je 3f
+ leal 1(%eax), %edx
LOCK
- xaddl %edx, (%ebx)
+#if VALUE == 0
+ cmpxchgl %edx, (%ebx)
+#else
+ cmpxchgl %edx, VALUE(%ebx)
+#endif
+ jnz 0b
+
+ cmpl $0, NWAITERS(%ebx)
+ je 2f
- movl $SYS_futex, %eax
movl $FUTEX_WAKE, %ecx
- addl $1, %edx
+ orl PRIVATE(%ebx), %ecx
+ movl $1, %edx
+ movl $SYS_futex, %eax
ENTER_KERNEL
testl %eax, %eax
js 1f
- xorl %eax, %eax
+2: xorl %eax, %eax
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
1:
#ifdef __PIC__
call __x86.get_pc_thunk.bx
@@ -80,6 +97,35 @@ __new_sem_post:
orl $-1, %eax
popl %ebx
ret
+
+3:
+#ifdef __PIC__
+ call __x86.get_pc_thunk.bx
+#else
+ movl $5f, %ebx
+5:
+#endif
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+#if USE___THREAD
+# ifdef NO_TLS_DIRECT_SEG_REFS
+ movl errno@gotntpoff(%ebx), %edx
+ addl %gs:0, %edx
+ movl $EOVERFLOW, (%edx)
+# else
+ movl errno@gotntpoff(%ebx), %edx
+ movl $EOVERFLOW, %gs:(%edx)
+# endif
+#else
+ call __errno_location@plt
+ movl $EOVERFLOW, (%eax)
+#endif
+
+ orl $-1, %eax
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ ret
+ cfi_endproc
.size __new_sem_post,.-__new_sem_post
weak_alias(__new_sem_post, sem_post)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
index 972b49fac..218b12f9c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,15 +19,13 @@
#include <sysdep.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-#define FUTEX_WAKE 1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
.text
@@ -35,55 +33,46 @@
.globl sem_timedwait
.type sem_timedwait,@function
.align 16
- cfi_startproc
sem_timedwait:
- /* First check for cancellation. */
- movl %gs:CANCELHANDLING, %eax
- andl $0xfffffff9, %eax
- cmpl $8, %eax
- je 10f
-
+.LSTARTCODE:
movl 4(%esp), %ecx
movl (%ecx), %eax
2: testl %eax, %eax
- je,pn 1f
+ je 1f
leal -1(%eax), %edx
LOCK
cmpxchgl %edx, (%ecx)
- jne,pn 2b
+ jne 2b
xorl %eax, %eax
ret
/* Check whether the timeout value is valid. */
1: pushl %esi
- cfi_adjust_cfa_offset(4)
+.Lpush_esi:
pushl %edi
- cfi_adjust_cfa_offset(4)
+.Lpush_edi:
pushl %ebx
- cfi_adjust_cfa_offset(4)
+.Lpush_ebx:
subl $12, %esp
- cfi_adjust_cfa_offset(12)
+.Lsub_esp:
movl 32(%esp), %edi
- cfi_offset(7, -12) /* %edi */
/* Check for invalid nanosecond field. */
cmpl $1000000000, 4(%edi)
movl $EINVAL, %esi
- cfi_offset(6, -8) /* %esi */
jae 6f
- cfi_offset(3, -16) /* %ebx */
-7: call __pthread_enable_asynccancel
- movl %eax, 8(%esp)
+ LOCK
+ incl NWAITERS(%ecx)
- xorl %ecx, %ecx
+7: xorl %ecx, %ecx
movl %esp, %ebx
movl %ecx, %edx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -103,19 +92,30 @@ sem_timedwait:
movl %ecx, (%esp) /* Store relative timeout. */
movl %edx, 4(%esp)
- movl 28(%esp), %ebx
- xorl %ecx, %ecx
+
+.LcleanupSTART:
+ call __pthread_enable_asynccancel
+ movl %eax, 8(%esp)
+
+ movl 28(%esp), %ebx /* Load semaphore address. */
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%ebx), %ecx
+#else
+ movl $FUTEX_WAIT, %ecx
+ orl PRIVATE(%ebx), %ecx
+#endif
movl %esp, %esi
- movl $SYS_futex, %eax
xorl %edx, %edx
+ movl $SYS_futex, %eax
ENTER_KERNEL
movl %eax, %esi
movl 8(%esp), %eax
call __pthread_disable_asynccancel
+.LcleanupEND:
testl %esi, %esi
- je,pt 9f
+ je 9f
cmpl $-EWOULDBLOCK, %esi
jne 3f
@@ -126,29 +126,27 @@ sem_timedwait:
leal -1(%eax), %ecx
LOCK
cmpxchgl %ecx, (%ebx)
- jne,pn 8b
+ jne 8b
- addl $12, %esp
- cfi_adjust_cfa_offset(-12)
xorl %eax, %eax
+
+ LOCK
+ decl NWAITERS(%ebx)
+
+10: addl $12, %esp
+.Ladd_esp:
popl %ebx
- cfi_adjust_cfa_offset(-4)
- cfi_restore(3)
+.Lpop_ebx:
popl %edi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(7)
+.Lpop_edi:
popl %esi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(6)
+.Lpop_esi:
ret
- cfi_adjust_cfa_offset(24)
- cfi_offset(6, -8) /* %esi */
- cfi_offset(7, -12) /* %edi */
- cfi_offset(3, -16) /* %ebx */
+.Lafter_ret:
3: negl %esi
6:
-#ifdef __PIC__
+#ifdef PIC
call __x86.get_pc_thunk.bx
#else
movl $4f, %ebx
@@ -169,25 +167,163 @@ sem_timedwait:
movl %esi, (%eax)
#endif
- addl $12, %esp
- cfi_adjust_cfa_offset(-12)
+ movl 28(%esp), %ebx /* Load semaphore address. */
orl $-1, %eax
- popl %ebx
- cfi_adjust_cfa_offset(-4)
- cfi_restore(3)
- popl %edi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(7)
- popl %esi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(6)
- ret
+ jmp 10b
+ .size sem_timedwait,.-sem_timedwait
+
-10: /* Canceled. */
- movl $0xffffffff, %gs:RESULT
+ .type sem_wait_cleanup,@function
+sem_wait_cleanup:
LOCK
- orl $0x10, %gs:CANCELHANDLING
- movl %gs:CLEANUP_JMP_BUF, %eax
- jmp HIDDEN_JUMPTARGET (__pthread_unwind)
- cfi_endproc
- .size sem_timedwait,.-sem_timedwait
+ decl NWAITERS(%ebx)
+ movl %eax, (%esp)
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ .size sem_wait_cleanup,.-sem_wait_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte 0xff # @LPStart format (omit)
+ .byte 0xff # @TType format (omit)
+ .byte 0x01 # call-site format
+ # DW_EH_PE_uleb128
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_wait_cleanup-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .long .LENDCIE-.LSTARTCIE # Length of the CIE.
+.LSTARTCIE:
+ .long 0 # CIE ID.
+ .byte 1 # Version number.
+#ifdef SHARED
+ .string "zPLR" # NUL-terminated augmentation
+ # string.
+#else
+ .string "zPL" # NUL-terminated augmentation
+ # string.
+#endif
+ .uleb128 1 # Code alignment factor.
+ .sleb128 -4 # Data alignment factor.
+ .byte 8 # Return address register
+ # column.
+#ifdef SHARED
+ .uleb128 7 # Augmentation value length.
+ .byte 0x9b # Personality: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4
+ # + DW_EH_PE_indirect
+ .long DW.ref.__gcc_personality_v0-.
+ .byte 0x1b # LSDA Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+ .byte 0x1b # FDE Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+#else
+ .uleb128 6 # Augmentation value length.
+ .byte 0x0 # Personality: absolute
+ .long __gcc_personality_v0
+ .byte 0x0 # LSDA Encoding: absolute
+#endif
+ .byte 0x0c # DW_CFA_def_cfa
+ .uleb128 4
+ .uleb128 4
+ .byte 0x88 # DW_CFA_offset, column 0x10
+ .uleb128 1
+ .align 4
+.LENDCIE:
+
+ .long .LENDFDE-.LSTARTFDE # Length of the FDE.
+.LSTARTFDE:
+ .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
+#ifdef SHARED
+ .long .LSTARTCODE-. # PC-relative start address
+ # of the code.
+#else
+ .long .LSTARTCODE # Start address of the code.
+#endif
+ .long .LENDCODE-.LSTARTCODE # Length of the code.
+ .uleb128 4 # Augmentation size
+#ifdef SHARED
+ .long .LexceptSTART-.
+#else
+ .long .LexceptSTART
+#endif
+
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_esi-.LSTARTCODE
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0x86 # DW_CFA_offset %esi
+ .uleb128 2
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_edi-.Lpush_esi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 12
+ .byte 0x87 # DW_CFA_offset %edi
+ .uleb128 3
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_ebx-.Lpush_edi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x83 # DW_CFA_offset %ebx
+ .uleb128 4
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lsub_esp-.Lpush_ebx
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 28
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Ladd_esp-.Lsub_esp
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpop_ebx-.Ladd_esp
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 12
+ .byte 0xc3 # DW_CFA_restore %ebx
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpop_edi-.Lpop_ebx
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0xc7 # DW_CFA_restore %edi
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpop_esi-.Lpop_edi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 4
+ .byte 0xc6 # DW_CFA_restore %esi
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lafter_ret-.Lpop_esi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 28
+ .byte 0x86 # DW_CFA_offset %esi
+ .uleb128 2
+ .byte 0x87 # DW_CFA_offset %edi
+ .uleb128 3
+ .byte 0x83 # DW_CFA_offset %ebx
+ .uleb128 4
+ .align 4
+.LENDFDE:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
index 7db64820f..dad96858f 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,13 +19,7 @@
#include <sysdep.h>
#include <pthread-errnos.h>
-#include <tls.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
+#include <lowlevellock.h>
.text
@@ -42,7 +36,7 @@ __new_sem_trywait:
leal -1(%eax), %edx
LOCK
cmpxchgl %edx, (%ecx)
- jne,pn 2b
+ jne 2b
xorl %eax, %eax
ret
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
index c3e6cbce6..b1c32ee4d 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,86 +19,98 @@
#include <sysdep.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-#include <tls.h>
+#include <structsem.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define FUTEX_WAKE 1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
.text
.globl __new_sem_wait
.type __new_sem_wait,@function
.align 16
- cfi_startproc
__new_sem_wait:
- /* First check for cancellation. */
- movl %gs:CANCELHANDLING, %eax
- andl $0xfffffff9, %eax
- cmpl $8, %eax
- je 5f
-
+.LSTARTCODE:
pushl %ebx
- cfi_adjust_cfa_offset(4)
+.Lpush_ebx:
pushl %esi
- cfi_adjust_cfa_offset(4)
+.Lpush_esi:
subl $4, %esp
- cfi_adjust_cfa_offset(4)
+.Lsub_esp:
movl 16(%esp), %ebx
- cfi_offset(3, -8) /* %ebx */
- cfi_offset(6, -12) /* %esi */
-3: movl (%ebx), %eax
+ movl (%ebx), %eax
2: testl %eax, %eax
- je,pn 1f
+ je 1f
leal -1(%eax), %edx
LOCK
cmpxchgl %edx, (%ebx)
- jne,pn 2b
- xorl %eax, %eax
+ jne 2b
+7: xorl %eax, %eax
- movl 4(%esp), %esi
- cfi_restore(6)
+9: movl 4(%esp), %esi
movl 8(%esp), %ebx
- cfi_restore(3)
addl $12, %esp
- cfi_adjust_cfa_offset(-12)
+.Ladd_esp:
ret
- cfi_adjust_cfa_offset(8)
- cfi_offset(3, -8) /* %ebx */
- cfi_offset(6, -12) /* %esi */
-1: call __pthread_enable_asynccancel
+.Lafter_ret:
+1: LOCK
+ incl NWAITERS(%ebx)
+
+.LcleanupSTART:
+6: call __pthread_enable_asynccancel
movl %eax, (%esp)
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%ebx), %ecx
+#else
+ movl $FUTEX_WAIT, %ecx
+ orl PRIVATE(%ebx), %ecx
+#endif
xorl %esi, %esi
+ xorl %edx, %edx
movl $SYS_futex, %eax
- movl %esi, %ecx
- movl %esi, %edx
ENTER_KERNEL
movl %eax, %esi
movl (%esp), %eax
call __pthread_disable_asynccancel
+.LcleanupEND:
testl %esi, %esi
- je 3b
+ je 3f
cmpl $-EWOULDBLOCK, %esi
- je 3b
+ jne 4f
+
+3:
+ movl (%ebx), %eax
+5: testl %eax, %eax
+ je 6b
+
+ leal -1(%eax), %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ jne 5b
+
+ LOCK
+ decl NWAITERS(%ebx)
+ jmp 7b
+
+4: LOCK
+ decl NWAITERS(%ebx)
+
negl %esi
#ifdef __PIC__
call __x86.get_pc_thunk.bx
#else
- movl $4f, %ebx
-4:
+ movl $8f, %ebx
+8:
#endif
addl $_GLOBAL_OFFSET_TABLE_, %ebx
#if USE___THREAD
@@ -115,20 +127,143 @@ __new_sem_wait:
movl %esi, (%eax)
#endif
orl $-1, %eax
- movl 4(%esp), %esi
- cfi_restore(6)
- movl 8(%esp), %ebx
- cfi_restore(3)
- addl $12, %esp
- cfi_adjust_cfa_offset(-12)
- ret
-5: /* Canceled. */
- movl $0xffffffff, %gs:RESULT
- LOCK
- orl $0x10, %gs:CANCELHANDLING
- movl %gs:CLEANUP_JMP_BUF, %eax
- jmp HIDDEN_JUMPTARGET (__pthread_unwind)
- cfi_endproc
+ jmp 9b
.size __new_sem_wait,.-__new_sem_wait
weak_alias(__new_sem_wait, sem_wait)
+
+
+ .type sem_wait_cleanup,@function
+sem_wait_cleanup:
+ LOCK
+ decl NWAITERS(%ebx)
+ movl %eax, (%esp)
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ .size sem_wait_cleanup,.-sem_wait_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte 0xff # @LPStart format (omit)
+ .byte 0xff # @TType format (omit)
+ .byte 0x01 # call-site format
+ # DW_EH_PE_uleb128
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_wait_cleanup-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .long .LENDCIE-.LSTARTCIE # Length of the CIE.
+.LSTARTCIE:
+ .long 0 # CIE ID.
+ .byte 1 # Version number.
+#ifdef SHARED
+ .string "zPLR" # NUL-terminated augmentation
+ # string.
+#else
+ .string "zPL" # NUL-terminated augmentation
+ # string.
+#endif
+ .uleb128 1 # Code alignment factor.
+ .sleb128 -4 # Data alignment factor.
+ .byte 8 # Return address register
+ # column.
+#ifdef SHARED
+ .uleb128 7 # Augmentation value length.
+ .byte 0x9b # Personality: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4
+ # + DW_EH_PE_indirect
+ .long DW.ref.__gcc_personality_v0-.
+ .byte 0x1b # LSDA Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+ .byte 0x1b # FDE Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+#else
+ .uleb128 6 # Augmentation value length.
+ .byte 0x0 # Personality: absolute
+ .long __gcc_personality_v0
+ .byte 0x0 # LSDA Encoding: absolute
+#endif
+ .byte 0x0c # DW_CFA_def_cfa
+ .uleb128 4
+ .uleb128 4
+ .byte 0x88 # DW_CFA_offset, column 0x10
+ .uleb128 1
+ .align 4
+.LENDCIE:
+
+ .long .LENDFDE-.LSTARTFDE # Length of the FDE.
+.LSTARTFDE:
+ .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
+#ifdef SHARED
+ .long .LSTARTCODE-. # PC-relative start address
+ # of the code.
+#else
+ .long .LSTARTCODE # Start address of the code.
+#endif
+ .long .LENDCODE-.LSTARTCODE # Length of the code.
+ .uleb128 4 # Augmentation size
+#ifdef SHARED
+ .long .LexceptSTART-.
+#else
+ .long .LexceptSTART
+#endif
+
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_ebx-.LSTARTCODE
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0x83 # DW_CFA_offset %ebx
+ .uleb128 2
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_esi-.Lpush_ebx
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 12
+ .byte 0x86 # DW_CFA_offset %esi
+ .uleb128 3
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lsub_esp-.Lpush_esi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Ladd_esp-.Lsub_esp
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 4
+ .byte 0xc3 # DW_CFA_restore %ebx
+ .byte 0xc6 # DW_CFA_restore %esi
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lafter_ret-.Ladd_esp
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x83 # DW_CFA_offset %ebx
+ .uleb128 2
+ .byte 0x86 # DW_CFA_offset %esi
+ .uleb128 3
+ .align 4
+.LENDFDE:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S
new file mode 100644
index 000000000..f768e16a7
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include "../i486/lowlevelrobustlock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S
new file mode 100644
index 000000000..f768e16a7
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include "../i486/lowlevelrobustlock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 97f3b09e2..55add8b8e 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -20,36 +20,87 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <atomic.h>
-#include <sysdep.h>
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
-/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
-
-#include <tls.h>
-
-#ifndef LOCK_INSTR
-# define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <bits/kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+# endif
+#else
+# ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
+# endif
#endif
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_WAIT_REQUEUE_PI 11
+#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG); \
+ __asm__ ("andl %%gs:%P1, %0" : "+r" (__fl) \
+ : "i" (offsetof (struct pthread, header.private_futex))); \
+ __fl | (fl); }))
+# endif
+#endif
+#ifndef __ASSEMBLER__
/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
#ifdef __PIC__
@@ -60,247 +111,436 @@ typedef int lll_lock_t;
# define LLL_EBX_REG "b"
#endif
-#define LLL_ENTER_KERNEL "int $0x80\n\t"
+#ifdef I386_USE_SYSENTER
+# ifdef SHARED
+# define LLL_ENTER_KERNEL "call *%%gs:%P6\n\t"
+# else
+# define LLL_ENTER_KERNEL "call *_dl_sysinfo\n\t"
+# endif
+#else
+# define LLL_ENTER_KERNEL "int $0x80\n\t"
+#endif
/* Delay in spinlock loop. */
-#define BUSY_WAIT_NOP __asm__ ("rep; nop")
-
-#define lll_futex_wait(futex, val) \
- lll_futex_timed_wait (futex, val, NULL)
-
-#define lll_futex_timed_wait(futex, val, timeout) \
+#define BUSY_WAIT_NOP __asm__ ("rep; nop")
+
+
+#define LLL_STUB_UNWIND_INFO_START \
+ ".section .eh_frame,\"a\",@progbits\n" \
+"5:\t" ".long 7f-6f # Length of Common Information Entry\n" \
+"6:\t" ".long 0x0 # CIE Identifier Tag\n\t" \
+ ".byte 0x1 # CIE Version\n\t" \
+ ".ascii \"zR\\0\" # CIE Augmentation\n\t" \
+ ".uleb128 0x1 # CIE Code Alignment Factor\n\t" \
+ ".sleb128 -4 # CIE Data Alignment Factor\n\t" \
+ ".byte 0x8 # CIE RA Column\n\t" \
+ ".uleb128 0x1 # Augmentation size\n\t" \
+ ".byte 0x1b # FDE Encoding (pcrel sdata4)\n\t" \
+ ".byte 0xc # DW_CFA_def_cfa\n\t" \
+ ".uleb128 0x4\n\t" \
+ ".uleb128 0x0\n\t" \
+ ".align 4\n" \
+"7:\t" ".long 17f-8f # FDE Length\n" \
+"8:\t" ".long 8b-5b # FDE CIE offset\n\t" \
+ ".long 1b-. # FDE initial location\n\t" \
+ ".long 4b-1b # FDE address range\n\t" \
+ ".uleb128 0x0 # Augmentation size\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x8\n\t" \
+ ".uleb128 10f-9f\n" \
+"9:\t" ".byte 0x78 # DW_OP_breg8\n\t" \
+ ".sleb128 3b-1b\n"
+#define LLL_STUB_UNWIND_INFO_END \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x8\n\t" \
+ ".uleb128 12f-11f\n" \
+"11:\t" ".byte 0x78 # DW_OP_breg8\n\t" \
+ ".sleb128 3b-2b\n" \
+"12:\t" ".byte 0x40 + (3b-2b-1) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x8\n\t" \
+ ".uleb128 16f-13f\n" \
+"13:\t" ".byte 0x78 # DW_OP_breg8\n\t" \
+ ".sleb128 15f-14f\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"14:\t" ".4byte 3b-.\n\t" \
+ ".byte 0x1c # DW_OP_minus\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"15:\t" ".4byte 18f-.\n\t" \
+ ".byte 0x22 # DW_OP_plus\n" \
+"16:\t" ".align 4\n" \
+"17:\t" ".previous\n"
+
+/* Unwind info for
+ 1: lea ..., ...
+ 2: call ...
+ 3: jmp 18f
+ 4:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_3 \
+LLL_STUB_UNWIND_INFO_START \
+"10:\t" ".byte 0x40 + (2b-1b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+/* Unwind info for
+ 1: lea ..., ...
+ 0: movl ..., ...
+ 2: call ...
+ 3: jmp 18f
+ 4:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_4 \
+LLL_STUB_UNWIND_INFO_START \
+"10:\t" ".byte 0x40 + (0b-1b) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x8\n\t" \
+ ".uleb128 20f-19f\n" \
+"19:\t" ".byte 0x78 # DW_OP_breg8\n\t" \
+ ".sleb128 3b-0b\n" \
+"20:\t" ".byte 0x40 + (2b-0b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+
+#define lll_futex_wait(futex, val, private) \
+ lll_futex_timed_wait (futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
({ \
- int __ret; \
- register __typeof (val) _val __asm__ ("edx") = (val); \
+ int __status; \
+ register __typeof (val) _val __asm__ ("edx") = (val); \
__asm__ __volatile (LLL_EBX_LOAD \
LLL_ENTER_KERNEL \
LLL_EBX_LOAD \
- : "=a" (__ret) \
+ : "=a" (__status) \
: "0" (SYS_futex), LLL_EBX_REG (futex), "S" (timeout), \
- "c" (FUTEX_WAIT), "d" (_val), \
- "i" (offsetof (tcbhead_t, sysinfo))); \
- __ret; })
+ "c" (__lll_private_flag (FUTEX_WAIT, private)), \
+ "d" (_val), "i" (offsetof (tcbhead_t, sysinfo)) \
+ : "memory"); \
+ __status; \
+ })
-#define lll_futex_wake(futex, nr) \
- ({ \
- int __ret; \
+#define lll_futex_wake(futex, nr, private) \
+ do { \
+ int __ignore; \
register __typeof (nr) _nr __asm__ ("edx") = (nr); \
__asm__ __volatile (LLL_EBX_LOAD \
LLL_ENTER_KERNEL \
LLL_EBX_LOAD \
- : "=a" (__ret) \
+ : "=a" (__ignore) \
: "0" (SYS_futex), LLL_EBX_REG (futex), \
- "c" (FUTEX_WAKE), "d" (_nr), \
+ "c" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "d" (_nr), \
"i" (0) /* phony, to align next arg's number */, \
"i" (offsetof (tcbhead_t, sysinfo))); \
- __ret; })
-
-
-/* Does not preserve %eax and %ecx. */
-extern int __lll_mutex_lock_wait (int val, int *__futex)
- __attribute ((regparm (2))) attribute_hidden;
-/* Does not preserve %eax, %ecx, and %edx. */
-extern int __lll_mutex_timedlock_wait (int val, int *__futex,
- const struct timespec *abstime)
- __attribute ((regparm (3))) attribute_hidden;
-/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wake (int *__futex)
- __attribute ((regparm (1))) attribute_hidden;
+ } while (0)
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
after the cmpxchg instruction. In case the operation succeded this
value is zero. In case the operation failed, the cmpxchg instruction
has loaded the current value of the memory work which is guaranteed
to be nonzero. */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgl %2, %1"
+#endif
+
+#define lll_trylock(futex) \
+ ({ int ret; \
+ __asm__ __volatile (__lll_trylock_asm \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER), \
+ "i" (MULTIPLE_THREADS_OFFSET) \
+ : "memory"); \
+ ret; })
+
+#define lll_robust_trylock(futex, id) \
({ int ret; \
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (id), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
({ int ret; \
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgl %1, %2\n\t"
+#endif
-#define lll_mutex_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jnz _L_mutex_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_lock_%=,@function\n" \
- "_L_mutex_lock_%=:\n\t" \
- "leal %2, %%ecx\n\t" \
- "call __lll_mutex_lock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex) \
- : "memory"); })
-
-
-/* Special version of lll_mutex_lock which causes the unlock function to
- always wakeup waiters. */
-#define lll_mutex_cond_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jnz _L_mutex_cond_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_cond_lock_%=,@function\n" \
- "_L_mutex_cond_lock_%=:\n\t" \
- "leal %2, %%ecx\n\t" \
- "call __lll_mutex_lock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_cond_lock_%=,.-_L_mutex_cond_lock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (2), "m" (futex) \
- : "memory"); })
-
-
-#define lll_mutex_timedlock(futex, timeout) \
- ({ int _result, ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
- "jnz _L_mutex_timedlock_%=\n\t" \
+#define lll_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_lock_asm_start \
+ "jnz _L_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=,@function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleal %2, %%ecx\n" \
+ "2:\tcall __lll_lock_wait_private\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_3 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (1), "m" (futex), \
+ "i" (MULTIPLE_THREADS_OFFSET) \
+ : "memory"); \
+ else \
+ { \
+ int ignore3; \
+ __asm__ __volatile (__lll_lock_asm_start \
+ "jnz _L_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=,@function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %8, %%ecx\n" \
+ "2:\tcall __lll_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), \
+ "=m" (futex), "=&d" (ignore3) \
+ : "1" (1), "m" (futex), \
+ "i" (MULTIPLE_THREADS_OFFSET), "0" (0), \
+ "g" ((int) (private)) \
+ : "memory"); \
+ } \
+ })
+
+#define lll_robust_lock(futex, id, private) \
+ ({ int __result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ "jnz _L_robust_lock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_mutex_timedlock_%=,@function\n" \
- "_L_mutex_timedlock_%=:\n\t" \
- "leal %3, %%ecx\n\t" \
- "movl %7, %%edx\n\t" \
- "call __lll_mutex_timedlock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_timedlock_%=,.-_L_mutex_timedlock_%=\n"\
+ ".type _L_robust_lock_%=,@function\n" \
+ "_L_robust_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_robust_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_lock_%=, 4b-1b\n\t" \
".previous\n" \
- "1:" \
- : "=a" (_result), "=c" (ignore1), "=&d" (ignore2), \
- "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex), "m" (timeout) \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (__result), "=c" (ignore1), "=m" (futex), \
+ "=&d" (ignore2) \
+ : "0" (0), "1" (id), "m" (futex), "g" ((int) (private))\
: "memory"); \
- _result; })
-
-
-#define lll_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm__ __volatile (LOCK_INSTR "subl $1,%0\n\t" \
- "jne _L_mutex_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_unlock_%=,@function\n" \
- "_L_mutex_unlock_%=:\n\t" \
- "leal %0, %%eax\n\t" \
- "call __lll_mutex_unlock_wake\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=m" (futex), "=&a" (ignore) \
- : "m" (futex) \
- : "memory"); })
-
-
-#define lll_mutex_islocked(futex) \
- (futex != 0)
-
-
-extern int __lll_lock_wait (int val, int *__futex)
- __attribute ((regparm (2))) attribute_hidden;
-extern int __lll_unlock_wake (int *__futex)
- __attribute ((regparm (1))) attribute_hidden;
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
+ __result; })
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- 2 - taken by more users */
+/* Special version of lll_lock which causes the unlock function to
+ always wakeup waiters. */
+#define lll_cond_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ "jnz _L_cond_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_cond_lock_%=,@function\n" \
+ "_L_cond_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_cond_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex), \
+ "=&d" (ignore3) \
+ : "0" (0), "1" (2), "m" (futex), "g" ((int) (private))\
+ : "memory"); \
+ })
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+ ({ int __result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ "jnz _L_robust_cond_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_cond_lock_%=,@function\n" \
+ "_L_robust_cond_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_robust_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_cond_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (__result), "=c" (ignore1), "=m" (futex), \
+ "=&d" (ignore2) \
+ : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex), \
+ "g" ((int) (private)) \
+ : "memory"); \
+ __result; })
-#if defined NOT_IN_libc
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
-#else
-/* Special versions of the macros for use in libc itself. They avoid
- the lock prefix when the thread library is not used. */
+#define lll_timedlock(futex, timeout, private) \
+ ({ int __result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
+ "jnz _L_timedlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_timedlock_%=,@function\n" \
+ "_L_timedlock_%=:\n" \
+ "1:\tleal %3, %%ecx\n" \
+ "0:\tmovl %8, %%edx\n" \
+ "2:\tcall __lll_timedlock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_timedlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (__result), "=c" (ignore1), "=&d" (ignore2), \
+ "=m" (futex), "=S" (ignore3) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
+ "4" ((int) (private)) \
+ : "memory"); \
+ __result; })
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm__ __volatile ("cmpl $0, %%gs:%P5\n\t" \
- "je,pt 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgl %2, %1; setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
+#define lll_robust_timedlock(futex, timeout, id, private) \
+ ({ int __result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
+ "jnz _L_robust_timedlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_timedlock_%=,@function\n" \
+ "_L_robust_timedlock_%=:\n" \
+ "1:\tleal %3, %%ecx\n" \
+ "0:\tmovl %8, %%edx\n" \
+ "2:\tcall __lll_robust_timedlock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_timedlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (__result), "=c" (ignore1), "=&d" (ignore2), \
+ "=m" (futex), "=S" (ignore3) \
+ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
+ "4" ((int) (private)) \
: "memory"); \
- ret; })
+ __result; })
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm__ __volatile ("cmpl $0, %%gs:%P6\n\t" \
- "je,pt 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgl %1, %2\n\t" \
- "jnz _L_mutex_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_lock_%=,@function\n" \
- "_L_mutex_lock_%=:\n\t" \
- "leal %2, %%ecx\n\t" \
- "call __lll_mutex_lock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
- : "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm__ __volatile ("cmpl $0, %%gs:%P3\n\t" \
- "je,pt 0f\n\t" \
- "lock\n" \
- "0:\tsubl $1,%0\n\t" \
- "jne _L_mutex_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_unlock_%=,@function\n" \
- "_L_mutex_unlock_%=:\n\t" \
- "leal %0, %%eax\n\t" \
- "call __lll_mutex_unlock_wake\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=m" (futex), "=&a" (ignore) \
- : "m" (futex), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
- : "memory"); })
+#if defined NOT_IN_libc || defined UP
+# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
+#else
+# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tsubl $1,%0\n\t"
#endif
+#define lll_unlock(futex, private) \
+ (void) \
+ ({ int ignore; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_unlock_asm \
+ "jne _L_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=,@function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleal %0, %%eax\n" \
+ "2:\tcall __lll_unlock_wake_private\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_3 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore) \
+ : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET) \
+ : "memory"); \
+ else \
+ { \
+ int ignore2; \
+ __asm__ __volatile (__lll_unlock_asm \
+ "jne _L_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=,@function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleal %0, %%eax\n" \
+ "0:\tmovl %5, %%ecx\n" \
+ "2:\tcall __lll_unlock_wake\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
+ : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex), \
+ "g" ((int) (private)) \
+ : "memory"); \
+ } \
+ })
+
+#define lll_robust_unlock(futex, private) \
+ (void) \
+ ({ int ignore, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "andl %3, %0\n\t" \
+ "jne _L_robust_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_unlock_%=,@function\n" \
+ "_L_robust_unlock_%=:\n\t" \
+ "1:\tleal %0, %%eax\n" \
+ "0:\tmovl %5, %%ecx\n" \
+ "2:\tcall __lll_unlock_wake\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
+ : "i" (FUTEX_WAITERS), "m" (futex), \
+ "g" ((int) (private)) \
+ : "memory"); \
+ })
+
+
+#define lll_robust_dead(futex, private) \
+ (void) \
+ ({ int __ignore; \
+ register int _nr __asm__ ("edx") = 1; \
+ __asm__ __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \
+ LLL_EBX_LOAD \
+ LLL_ENTER_KERNEL \
+ LLL_EBX_LOAD \
+ : "=a" (__ignore) \
+ : "0" (SYS_futex), LLL_EBX_REG (&(futex)), \
+ "c" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "d" (_nr), "i" (FUTEX_OWNER_DIED), \
+ "i" (offsetof (tcbhead_t, sysinfo))); \
+ })
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -308,21 +548,22 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
The macro parameter must not have any side effect. */
#define lll_wait_tid(tid) \
- ({ \
- int __ret; \
- register __typeof (tid) _tid __asm__ ("edx") = (tid); \
+ do { \
+ int __ignore; \
+ register __typeof (tid) _tid __asm__ ("edx") = (tid); \
if (_tid != 0) \
__asm__ __volatile (LLL_EBX_LOAD \
"1:\tmovl %1, %%eax\n\t" \
LLL_ENTER_KERNEL \
"cmpl $0, (%%ebx)\n\t" \
- "jne,pn 1b\n\t" \
+ "jne 1b\n\t" \
LLL_EBX_LOAD \
- : "=&a" (__ret) \
+ : "=&a" (__ignore) \
: "i" (SYS_futex), LLL_EBX_REG (&tid), "S" (0), \
"c" (FUTEX_WAIT), "d" (_tid), \
- "i" (offsetof (tcbhead_t, sysinfo))); \
- __ret; })
+ "i" (offsetof (tcbhead_t, sysinfo)) \
+ : "memory"); \
+ } while (0)
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
__attribute__ ((regparm (2))) attribute_hidden;
@@ -338,28 +579,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
} \
__result; })
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- __attribute ((regparm (2))) attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
+#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h
index 5bdba3f51..6557359b4 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h
@@ -1,5 +1,5 @@
/* Uncancelable versions of cancelable interfaces. Linux/NPTL version.
- Copyright (C) 2003 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
@@ -26,20 +26,21 @@ extern int __close_nocancel (int) attribute_hidden;
extern int __read_nocancel (int, void *, size_t) attribute_hidden;
extern int __write_nocancel (int, const void *, size_t) attribute_hidden;
extern pid_t __waitpid_nocancel (pid_t, int *, int) attribute_hidden;
-
-libc_hidden_proto(__open_nocancel)
-libc_hidden_proto(__close_nocancel)
-libc_hidden_proto(__read_nocancel)
-libc_hidden_proto(__write_nocancel)
-libc_hidden_proto(__waitpid_nocancel)
-
+extern int __openat_nocancel (int fd, const char *fname, int oflag,
+ mode_t mode) attribute_hidden;
+extern int __openat64_nocancel (int fd, const char *fname, int oflag,
+ mode_t mode) attribute_hidden;
#else
-#define __open_nocancel(name, ...) open (name, __VA_ARGS__)
-#define __close_nocancel(fd) close (fd)
-#define __read_nocancel(fd, buf, len) read (fd, buf, len)
-#define __write_nocancel(fd, buf, len) write (fd, buf, len)
-#define __waitpid_nocancel(pid, stat_loc, options) \
- waitpid (pid, stat_loc, options)
+# define __open_nocancel(name, ...) __open (name, __VA_ARGS__)
+# define __close_nocancel(fd) __close (fd)
+# define __read_nocancel(fd, buf, len) __read (fd, buf, len)
+# define __write_nocancel(fd, buf, len) __write (fd, buf, len)
+# define __waitpid_nocancel(pid, stat_loc, options) \
+ __waitpid (pid, stat_loc, options)
+# define __openat_nocancel(fd, fname, oflag, mode) \
+ openat (fd, fname, oflag, mode)
+# define __openat64_nocancel(fd, fname, oflag, mode) \
+ openat64 (fd, fname, oflag, mode)
#endif
/* Uncancelable open. */
@@ -48,6 +49,16 @@ libc_hidden_proto(__waitpid_nocancel)
#define open_not_cancel_2(name, flags) \
__open_nocancel (name, flags)
+/* Uncancelable openat. */
+#define openat_not_cancel(fd, fname, oflag, mode) \
+ __openat_nocancel (fd, fname, oflag, mode)
+#define openat_not_cancel_3(fd, fname, oflag) \
+ __openat_nocancel (fd, fname, oflag, 0)
+#define openat64_not_cancel(fd, fname, oflag, mode) \
+ __openat64_nocancel (fd, fname, oflag, mode)
+#define openat64_not_cancel_3(fd, fname, oflag) \
+ __openat64_nocancel (fd, fname, oflag, 0)
+
/* Uncancelable close. */
#define close_not_cancel(fd) \
__close_nocancel (fd)
@@ -80,3 +91,15 @@ libc_hidden_proto(__waitpid_nocancel)
# define waitpid_not_cancel(pid, stat_loc, options) \
INLINE_SYSCALL (wait4, 4, pid, stat_loc, options, NULL)
#endif
+
+/* Uncancelable pause. */
+#define pause_not_cancel() \
+ __pause_nocancel ()
+
+/* Uncancelable nanosleep. */
+#define nanosleep_not_cancel(requested_time, remaining) \
+ __nanosleep_nocancel (requested_time, remaining)
+
+/* Uncancelable sigsuspend. */
+#define sigsuspend_not_cancel(set) \
+ __sigsuspend_nocancel (set)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S
index 939538927..7ab222e1b 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S
@@ -17,6 +17,10 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
+#include <sysdep.h>
+#define _ERRNO_H 1
+#include <bits/errno.h>
+#include <bits/kernel-features.h>
#include <tcb-offsets.h>
/* Save the PID value. */
@@ -33,4 +37,32 @@
movl %edx, %gs:PID; \
1:
-#include <../../../../../../../libc/sysdeps/linux/i386/vfork.S>
+/* Clone the calling process, but without copying the whole address space.
+ The calling process is suspended until the new process exits or is
+ replaced by a call to `execve'. Return -1 for errors, 0 to the new process,
+ and the process ID of the new process to the old process. */
+
+ENTRY (__vfork)
+ /* Pop the return PC value into ECX. */
+ popl %ecx
+
+ SAVE_PID
+
+ /* Stuff the syscall number in EAX and enter into the kernel. */
+ movl $SYS_ify (vfork), %eax
+ int $0x80
+
+ RESTORE_PID
+
+ /* Jump to the return PC. Don't jump directly since this
+ disturbs the branch target cache. Instead push the return
+ address back on the stack. */
+ pushl %ecx
+
+ cmpl $-4095, %eax
+ jae SYSCALL_ERROR_LABEL /* Branch forward if it failed. */
+.Lpseudo_end:
+ ret
+PSEUDO_END (__vfork)
+
+weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
index 5ab2c5856..9a3b36303 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,14 +19,9 @@
#include <unwindbuf.h>
#include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define FUTEX_WAKE 1
.comm __fork_generation, 4, 4
@@ -89,7 +84,16 @@ __pthread_once:
jnz 3f /* Different for generation -> run initializer. */
/* Somebody else got here first. Wait. */
- movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAIT|FUTEX_PRIVATE_FLAG, %ecx
+#else
+# if FUTEX_WAIT == 0
+ movl %gs:PRIVATE_FUTEX, %ecx
+# else
+ movl $FUTEX_WAIT, %ecx
+ orl %gs:PRIVATE_FUTEX, %ecx
+# endif
+#endif
movl $SYS_futex, %eax
ENTER_KERNEL
jmp 6b
@@ -130,7 +134,12 @@ __pthread_once:
/* Wake up all other threads. */
movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %ecx
+#else
movl $FUTEX_WAKE, %ecx
+ orl %gs:PRIVATE_FUTEX, %ecx
+#endif
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -151,7 +160,12 @@ __pthread_once:
movl $0, (%ebx)
movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %ecx
+#else
movl $FUTEX_WAKE, %ecx
+ orl %gs:PRIVATE_FUTEX, %ecx
+#endif
movl $SYS_futex, %eax
ENTER_KERNEL
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h
index 2c0cbe99a..f68a0c075 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h
@@ -1,5 +1,5 @@
-/* Determine whether the host has multiple processors. SH version.
- Copyright (C) 2002 Free Software Foundation, Inc.
+/* Determine whether the host has multiple processors. Linux version.
+ Copyright (C) 1996, 2002, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,8 +17,40 @@
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/utsname.h>
+#include <not-cancel.h>
+
+/* Test whether the machine has more than one processor. This is not the
+ best test but good enough. More complicated tests would require `malloc'
+ which is not available at that time. */
static inline int
is_smp_system (void)
{
- return 0;
+ union
+ {
+ struct utsname uts;
+ char buf[512];
+ } u;
+ char *cp;
+
+ /* Try reading the number using `sysctl' first. */
+ if (uname (&u.uts) == 0)
+ cp = u.uts.version;
+ else
+ {
+ /* This was not successful. Now try reading the /proc filesystem. */
+ int fd = open_not_cancel_2 ("/proc/sys/kernel/version", O_RDONLY);
+ if (__builtin_expect (fd, 0) == -1
+ || read_not_cancel (fd, u.buf, sizeof (u.buf)) <= 0)
+ /* This also didn't work. We give up and say it's a UP machine. */
+ u.buf[0] = '\0';
+
+ close_not_cancel_no_status (fd);
+ cp = u.buf;
+ }
+
+ return strstr (cp, "SMP") != NULL;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h
index f32c5bd20..cb8d6891c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
@@ -58,6 +58,7 @@
# define SAVE_OLDTYPE_3 SAVE_OLDTYPE_2
# define SAVE_OLDTYPE_4 SAVE_OLDTYPE_2
# define SAVE_OLDTYPE_5 SAVE_OLDTYPE_2
+# define SAVE_OLDTYPE_6 SAVE_OLDTYPE_2
# define PUSHCARGS_0 /* No arguments to push. */
# define DOCARGS_0 /* No arguments to frob. */
@@ -101,6 +102,14 @@
# define _POPCARGS_5 _POPCARGS_4; popl %edi; \
cfi_adjust_cfa_offset (-4); cfi_restore (edi);
+# define PUSHCARGS_6 _PUSHCARGS_6
+# define DOCARGS_6 _DOARGS_6 (44)
+# define POPCARGS_6 _POPCARGS_6
+# define _PUSHCARGS_6 pushl %ebp; cfi_adjust_cfa_offset (4); \
+ cfi_rel_offset (ebp, 0); _PUSHCARGS_5
+# define _POPCARGS_6 _POPCARGS_5; popl %ebp; \
+ cfi_adjust_cfa_offset (-4); cfi_restore (ebp);
+
# ifdef IS_IN_libpthread
# define CENABLE call __pthread_enable_asynccancel;
# define CDISABLE call __pthread_disable_asynccancel
@@ -122,6 +131,7 @@
# define POPSTATE_3 POPSTATE_2
# define POPSTATE_4 POPSTATE_3
# define POPSTATE_5 POPSTATE_4
+# define POPSTATE_6 POPSTATE_5
# ifndef __ASSEMBLER__
# define SINGLE_THREAD_P \
@@ -137,3 +147,9 @@
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S
index dc7fb2ec4..b39099af5 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 1999, 2002, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 1999,2002,2004,2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or