summaryrefslogtreecommitdiff
path: root/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64
diff options
context:
space:
mode:
authorAustin Foxley <austinf@cetoncorp.com>2010-02-16 12:27:18 -0800
committerAustin Foxley <austinf@cetoncorp.com>2010-02-16 12:27:18 -0800
commita032a6587011cbdac8c2f7e11f15dc4e592bbb55 (patch)
treeb8d8dfc6abf0168e098223c2134a3e4bd7640942 /libpthread/nptl/sysdeps/unix/sysv/linux/x86_64
parent70f1d42b13a741f603472f405299e5d2938aa728 (diff)
mass sync with glibc nptl
Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
Diffstat (limited to 'libpthread/nptl/sysdeps/unix/sysv/linux/x86_64')
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions7
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S334
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h718
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S40
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S81
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S93
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S819
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S470
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S152
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S47
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S121
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S120
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S43
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S47
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S57
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S327
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S11
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S187
20 files changed, 2484 insertions, 1195 deletions
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions
deleted file mode 100644
index 3b111ddb5..000000000
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions
+++ /dev/null
@@ -1,7 +0,0 @@
-librt {
- GLIBC_2.3.3 {
- # Changed timer_t.
- timer_create; timer_delete; timer_getoverrun; timer_gettime;
- timer_settime;
- }
-}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
index 57edbbbfb..e973bc5bf 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
@@ -33,9 +33,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
index 1e461ad41..b0d04c75b 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2006, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,39 +19,74 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
#endif
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
+ cfi_startproc
pushq %r10
+ cfi_adjust_cfa_offset(8)
pushq %rdx
-
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
xorq %r10, %r10 /* No timeout. */
movl $2, %edx
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
+ LOAD_PRIVATE_FUTEX_WAIT (%esi)
cmpl %edx, %eax /* NB: %edx == 2 */
jne 2f
@@ -66,33 +101,144 @@ __lll_mutex_lock_wait:
jnz 1b
popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
retq
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ cfi_endproc
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
+ .align 16
+__lll_lock_wait:
+ cfi_startproc
+ pushq %r10
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
+ xorq %r10, %r10 /* No timeout. */
+ movl $2, %edx
+ LOAD_FUTEX_WAIT (%esi)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+2: movl %edx, %eax
+ xchgl %eax, (%rdi) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
+ retq
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ /* %rdi: futex
+ %rsi: flags
+ %rdx: timeout
+ %eax: futex value
+ */
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
.align 16
-__lll_mutex_timedlock_wait:
+__lll_timedlock_wait:
+ cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r9, 0)
+ movq %rdx, %r10
+ movl $0xffffffff, %r9d
+ LOAD_FUTEX_WAIT_ABS (%esi)
+
+ movl $2, %edx
+ cmpl %edx, %eax
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ movl $2, %edx
+ syscall
+
+2: xchgl %edx, (%rdi) /* NB: lock is implied */
+
+ testl %edx, %edx
+ jz 3f
+
+ cmpl $-ETIMEDOUT, %eax
+ je 4f
+ cmpl $-EINVAL, %eax
+ jne 1b
+4: movl %eax, %edx
+ negl %edx
+
+3: movl %edx, %eax
+ popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ retq
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
/* Check for a valid timeout value. */
cmpq $1000000000, 8(%rdx)
jae 3f
pushq %r8
+ cfi_adjust_cfa_offset(8)
pushq %r9
+ cfi_adjust_cfa_offset(8)
pushq %r12
+ cfi_adjust_cfa_offset(8)
pushq %r13
+ cfi_adjust_cfa_offset(8)
pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r8, -16)
+ cfi_offset(%r9, -24)
+ cfi_offset(%r12, -32)
+ cfi_offset(%r13, -40)
+ cfi_offset(%r14, -48)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
/* Stack frame for the timespec and timeval structs. */
- subq $16, %rsp
+ subq $24, %rsp
+ cfi_adjust_cfa_offset(24)
movq %rdi, %r12
movq %rdx, %r13
+ movl $2, %edx
+ xchgl %edx, (%r12)
+
+ testl %edx, %edx
+ je 6f
+
1:
/* Get current time. */
movq %rsp, %rdi
@@ -114,118 +260,137 @@ __lll_mutex_timedlock_wait:
addq $1000000000, %rsi
decq %rdi
4: testq %rdi, %rdi
- js 5f /* Time is already up. */
+ js 2f /* Time is already up. */
- /* Futex call. */
- movq %rdi, (%rsp) /* Store relative timeout. */
+ /* Store relative timeout. */
+ movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
- movl $1, %eax
+ /* Futex call. */
movl $2, %edx
- LOCK
- cmpxchgl %edx, (%r12)
-
- testl %eax, %eax
- je 8f
-
+ movl $1, %eax
movq %rsp, %r10
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
+ movl 24(%rsp), %esi
+ LOAD_FUTEX_WAIT (%esi)
movq %r12, %rdi
movl $SYS_futex, %eax
syscall
- movq %rax, %rcx
-8: /* NB: %edx == 2 */
- xorl %eax, %eax
- LOCK
- cmpxchgl %edx, (%rdi)
- jnz 7f
+ /* NB: %edx == 2 */
+ xchgl %edx, (%r12)
+
+ testl %edx, %edx
+ je 6f
+
+ cmpl $-ETIMEDOUT, %eax
+ jne 1b
+2: movl $ETIMEDOUT, %edx
-6: addq $16, %rsp
+6: addq $32, %rsp
+ cfi_adjust_cfa_offset(-32)
popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
popq %r8
- retq
-
- /* Check whether the time expired. */
-7: cmpq $-ETIMEDOUT, %rcx
- je 5f
-
- /* Make sure the current holder knows we are going to sleep. */
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r8)
movl %edx, %eax
- xchgl %eax, (%rdi)
- testl %eax, %eax
- jz 6b
- jmp 1b
+ retq
3: movl $EINVAL, %eax
retq
-
-5: movl $ETIMEDOUT, %eax
- jmp 6b
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+# endif
+ cfi_endproc
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
-#ifdef NOT_IN_libc
- .globl lll_unlock_wake_cb
- .type lll_unlock_wake_cb,@function
- .hidden lll_unlock_wake_cb
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
.align 16
-lll_unlock_wake_cb:
+__lll_unlock_wake_private:
+ cfi_startproc
pushq %rsi
+ cfi_adjust_cfa_offset(8)
pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%rsi, -16)
+ cfi_offset(%rdx, -24)
- LOCK
- addl $1, (%rdi)
- jng 1f
+ movl $0, (%rdi)
+ LOAD_PRIVATE_FUTEX_WAKE (%esi)
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ syscall
popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
popq %rsi
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rsi)
retq
- .size lll_unlock_wake_cb,.-lll_unlock_wake_cb
-#endif
-
+ cfi_endproc
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
.align 16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake:
+ cfi_startproc
pushq %rsi
+ cfi_adjust_cfa_offset(8)
pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%rsi, -16)
+ cfi_offset(%rdx, -24)
movl $0, (%rdi)
- movl $FUTEX_WAKE, %esi
+ LOAD_FUTEX_WAKE (%esi)
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
syscall
popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
popq %rsi
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rsi)
retq
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
-
-#ifdef NOT_IN_libc
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
.align 16
__lll_timedwait_tid:
+ cfi_startproc
pushq %r12
+ cfi_adjust_cfa_offset(8)
pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r12, -16)
+ cfi_offset(%r13, -24)
movq %rdi, %r12
movq %rsi, %r13
subq $16, %rsp
+ cfi_adjust_cfa_offset(16)
/* Get current time. */
2: movq %rsp, %rdi
@@ -255,6 +420,8 @@ __lll_timedwait_tid:
jz 4f
movq %rsp, %r10
+ /* XXX The kernel so far uses global futex for the wakeup at
+ all times. */
#if FUTEX_WAIT == 0
xorl %esi, %esi
#else
@@ -269,14 +436,21 @@ __lll_timedwait_tid:
4: xorl %eax, %eax
8: addq $16, %rsp
+ cfi_adjust_cfa_offset(-16)
popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
retq
+ cfi_adjust_cfa_offset(32)
1: cmpq $-ETIMEDOUT, %rax
jne 2b
6: movl $ETIMEDOUT, %eax
jmp 8b
+ cfi_endproc
.size __lll_timedwait_tid,.-__lll_timedwait_tid
#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index c9f30e962..7c042fc80 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -20,266 +20,541 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <atomic.h>
-#include <sysdep.h>
-
-#ifndef LOCK_INSTR
-# ifdef UP
-# define LOCK_INSTR /* nothing */
-# else
-# define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <bits/kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+# endif
+#else
+# ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
# endif
#endif
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_WAIT_REQUEUE_PI 11
+#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+#ifndef __ASSEMBLER__
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG); \
+ __asm__ ("andl %%fs:%P1, %0" : "+r" (__fl) \
+ : "i" (offsetof (struct pthread, header.private_futex))); \
+ __fl | (fl); }))
+# endif
+#endif
-
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
+/* Initializer for lock. */
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
/* Delay in spinlock loop. */
-#define BUSY_WAIT_NOP __asm__ ("rep; nop")
-
-
-#define lll_futex_wait(futex, val) \
- do { \
- int __ignore; \
+#define BUSY_WAIT_NOP __asm__ ("rep; nop")
+
+
+#define LLL_STUB_UNWIND_INFO_START \
+ ".section .eh_frame,\"a\",@progbits\n" \
+"7:\t" ".long 9f-8f # Length of Common Information Entry\n" \
+"8:\t" ".long 0x0 # CIE Identifier Tag\n\t" \
+ ".byte 0x1 # CIE Version\n\t" \
+ ".ascii \"zR\\0\" # CIE Augmentation\n\t" \
+ ".uleb128 0x1 # CIE Code Alignment Factor\n\t" \
+ ".sleb128 -8 # CIE Data Alignment Factor\n\t" \
+ ".byte 0x10 # CIE RA Column\n\t" \
+ ".uleb128 0x1 # Augmentation size\n\t" \
+ ".byte 0x1b # FDE Encoding (pcrel sdata4)\n\t" \
+ ".byte 0x12 # DW_CFA_def_cfa_sf\n\t" \
+ ".uleb128 0x7\n\t" \
+ ".sleb128 16\n\t" \
+ ".align 8\n" \
+"9:\t" ".long 23f-10f # FDE Length\n" \
+"10:\t" ".long 10b-7b # FDE CIE offset\n\t" \
+ ".long 1b-. # FDE initial location\n\t" \
+ ".long 6b-1b # FDE address range\n\t" \
+ ".uleb128 0x0 # Augmentation size\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 12f-11f\n" \
+"11:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-1b\n"
+#define LLL_STUB_UNWIND_INFO_END \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 14f-13f\n" \
+"13:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-2b\n" \
+"14:\t" ".byte 0x40 + (3b-2b) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x0e # DW_CFA_def_cfa_offset\n\t" \
+ ".uleb128 0\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 16f-15f\n" \
+"15:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-3b\n" \
+"16:\t" ".byte 0x40 + (4b-3b-1) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x0e # DW_CFA_def_cfa_offset\n\t" \
+ ".uleb128 128\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 20f-17f\n" \
+"17:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 19f-18f\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"18:\t" ".4byte 4b-.\n\t" \
+ ".byte 0x1c # DW_OP_minus\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"19:\t" ".4byte 24f-.\n\t" \
+ ".byte 0x22 # DW_OP_plus\n" \
+"20:\t" ".byte 0x40 + (5b-4b+1) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x13 # DW_CFA_def_cfa_offset_sf\n\t" \
+ ".sleb128 16\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 22f-21f\n" \
+"21:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-5b\n" \
+"22:\t" ".align 8\n" \
+"23:\t" ".previous\n"
+
+/* Unwind info for
+ 1: leaq ..., %rdi
+ 2: subq $128, %rsp
+ 3: callq ...
+ 4: addq $128, %rsp
+ 5: jmp 24f
+ 6:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_5 \
+LLL_STUB_UNWIND_INFO_START \
+"12:\t" ".byte 0x40 + (2b-1b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+/* Unwind info for
+ 1: leaq ..., %rdi
+ 0: movq ..., %rdx
+ 2: subq $128, %rsp
+ 3: callq ...
+ 4: addq $128, %rsp
+ 5: jmp 24f
+ 6:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_6 \
+LLL_STUB_UNWIND_INFO_START \
+"12:\t" ".byte 0x40 + (0b-1b) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 26f-25f\n" \
+"25:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-0b\n" \
+"26:\t" ".byte 0x40 + (2b-0b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+
+#define lll_futex_wait(futex, val, private) \
+ lll_futex_timed_wait(futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
+ ({ \
+ register const struct timespec *__to __asm__ ("r10") = timeout; \
+ int __status; \
register __typeof (val) _val __asm__ ("edx") = (val); \
- __asm__ __volatile ("xorq %%r10, %%r10\n\t" \
- "syscall" \
- : "=a" (__ignore) \
- : "0" (SYS_futex), "D" (futex), "S" (FUTEX_WAIT), \
- "d" (_val) \
- : "memory", "cc", "r10", "r11", "cx"); \
- } while (0)
+ __asm__ __volatile ("syscall" \
+ : "=a" (__status) \
+ : "0" (SYS_futex), "D" (futex), \
+ "S" (__lll_private_flag (FUTEX_WAIT, private)), \
+ "d" (_val), "r" (__to) \
+ : "memory", "cc", "r11", "cx"); \
+ __status; \
+ })
-#define lll_futex_wake(futex, nr) \
+#define lll_futex_wake(futex, nr, private) \
do { \
int __ignore; \
register __typeof (nr) _nr __asm__ ("edx") = (nr); \
__asm__ __volatile ("syscall" \
: "=a" (__ignore) \
- : "0" (SYS_futex), "D" (futex), "S" (FUTEX_WAKE), \
+ : "0" (SYS_futex), "D" (futex), \
+ "S" (__lll_private_flag (FUTEX_WAKE, private)), \
"d" (_nr) \
: "memory", "cc", "r10", "r11", "cx"); \
} while (0)
-/* Does not preserve %eax and %ecx. */
-extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden;
-/* Does not preserver %eax, %ecx, and %edx. */
-extern int __lll_mutex_timedlock_wait (int *__futex, int __val,
- const struct timespec *__abstime)
- attribute_hidden;
-/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
-
-
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
after the cmpxchg instruction. In case the operation succeded this
value is zero. In case the operation failed, the cmpxchg instruction
has loaded the current value of the memory work which is guaranteed
to be nonzero. */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %2, %1\n\t" \
+ "jmp 1f\n\t" \
+ "0:\tcmpxchgl %2, %1\n\t" \
+ "1:"
+#endif
+
+#define lll_trylock(futex) \
({ int ret; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm__ __volatile (__lll_trylock_asm \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-
-#define lll_mutex_cond_trylock(futex) \
+#define lll_robust_trylock(futex, id) \
({ int ret; \
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
+#define lll_cond_trylock(futex) \
+ ({ int ret; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
+ : "memory"); \
+ ret; })
-#define lll_mutex_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %2, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_mutex_lock_wait\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (1), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_mutex_cond_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %2, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_mutex_lock_wait\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (2), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_mutex_timedlock(futex, timeout) \
- ({ int _result, ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
+ "jmp 24f\n" \
+ "0:\tcmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#endif
+
+#define lll_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); \
+ else \
+ __asm__ __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (1), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ }) \
+
+#define lll_robust_lock(futex, id, private) \
+ ({ int result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_lock_%=, @function\n" \
+ "_L_robust_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (result) \
+ : "1" (id), "m" (futex), "3" (0), "0" (private) \
+ : "c