summaryrefslogtreecommitdiff
path: root/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/nptl/sysdeps/unix/sysv/linux/x86_64')
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile13
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile.arch78
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h225
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h41
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S116
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/clone.S3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/compat-timer.h46
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/fork.c31
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S22
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S20
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S22
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S457
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h598
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S306
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-__syscall_error.c1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S33
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S161
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S177
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S160
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S803
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S486
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S189
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S179
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S276
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S268
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S130
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S167
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c14
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_init.c1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_unlock.S1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S89
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S379
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S52
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S174
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h111
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/vfork.S43
36 files changed, 5872 insertions, 0 deletions
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile
new file mode 100644
index 000000000..43a6fad84
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile
@@ -0,0 +1,13 @@
+# Makefile for uClibc NPTL
+#
+# Copyright (C) 2005 Steven J. Hill <sjhill@uclibc.org>
+#
+# Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+#
+
+top_srcdir=../../../../../../../
+top_builddir=../../../../../../../
+all: objs
+include $(top_builddir)Rules.mak
+include Makefile.arch
+include $(top_srcdir)Makerules
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile.arch b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile.arch
new file mode 100644
index 000000000..53a87728f
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile.arch
@@ -0,0 +1,78 @@
+# Makefile for uClibc NPTL
+#
+# Copyright (C) 2006 Steven J. Hill <sjhill@uclibc.org>
+#
+# Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+#
+LINUX_ARCH_DIR:=$(top_srcdir)libpthread/nptl/sysdeps/unix/sysv/linux/x86_64
+LINUX_ARCH_OUT:=$(top_builddir)libpthread/nptl/sysdeps/unix/sysv/linux/x86_64
+
+
+libpthread_SSRC = pt-vfork.S clone.S pthread_once.S
+libpthread_CSRC = pthread_spin_init.c pt-__syscall_error.c
+
+libc_a_CSRC = fork.c
+libc_a_SSRC = clone.S vfork.S
+
+libpthread_SSRC += lowlevellock.S pthread_barrier_wait.S pthread_cond_signal.S pthread_cond_broadcast.S \
+ sem_post.S sem_timedwait.S lowlevelrobustlock.S \
+ sem_trywait.S sem_wait.S pthread_rwlock_rdlock.S pthread_rwlock_wrlock.S \
+ pthread_rwlock_timedrdlock.S pthread_rwlock_timedwrlock.S pthread_rwlock_unlock.S \
+ pthread_spin_unlock.S
+# pthread_cond_timedwait.S pthread_cond_wait.S
+libc_a_SSRC += libc-lowlevellock.S
+
+
+CFLAGS-OMIT-fork.c = -DNOT_IN_libc=1 -DIS_IN_libpthread=1
+CFLAGS-pt-__syscall_error.c = -DNOT_IN_libc=1 -DIS_IN_libpthread=1
+
+ifeq ($(UCLIBC_HAS_STDIO_FUTEXES),y)
+CFLAGS-fork.c = -D__USE_STDIO_FUTEXES__
+endif
+
+ASFLAGS-pt-vfork.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-lowlevellock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-pthread_once.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+
+
+ASFLAGS-clone.S = -D_LIBC_REENTRANT
+ASFLAGS-vfork.S = -D_LIBC_REENTRANT
+ASFLAGS-libc-lowlevellock.S = -D_LIBC_REENTRANT
+
+ifeq ($(UCLIBC_HAS_THREADS_NATIVE),y)
+#Needed to use the correct SYSCALL_ERROR_HANDLER
+ASFLAGS-clone.S += -DUSE___THREAD
+ASFLAGS-vfork.S += -DUSE___THREAD
+ASFLAGS-sem_wait.S += -DUSE___THREAD
+ASFLAGS-sem_trywait.S += -DUSE___THREAD
+ASFLAGS-sem_timedwait.S += -DUSE___THREAD
+ASFLAGS-sem_post.S += -DUSE___THREAD
+endif
+
+CFLAGS += $(SSP_ALL_CFLAGS)
+#CFLAGS:=$(CFLAGS:-O1=-O2)
+
+LINUX_ARCH_OBJ:=$(patsubst %.S,$(LINUX_ARCH_OUT)/%.o,$(libpthread_SSRC))
+LINUX_ARCH_OBJ+=$(patsubst %.c,$(LINUX_ARCH_OUT)/%.o,$(libpthread_CSRC))
+
+ifeq ($(DOPIC),y)
+libpthread-a-y += $(LINUX_ARCH_OBJ:.o=.os)
+else
+libpthread-a-y += $(LINUX_ARCH_OBJ)
+endif
+libpthread-so-y += $(LINUX_ARCH_OBJ:.o=.oS)
+
+libpthread-nomulti-y+=$(LINUX_ARCH_OBJS)
+
+LIBC_LINUX_ARCH_OBJ:=$(patsubst %.c,$(LINUX_ARCH_OUT)/%.o,$(libc_a_CSRC))
+LIBC_LINUX_ARCH_OBJ+=$(patsubst %.S,$(LINUX_ARCH_OUT)/%.o,$(libc_a_SSRC))
+
+libc-static-y+=$(LIBC_LINUX_ARCH_OBJ)
+libc-shared-y+=$(LIBC_LINUX_ARCH_OBJ:.o=.oS)
+
+libc-nomulti-y+=$(LIBC_LINUX_ARCH_OBJ)
+
+objclean-y+=nptl_linux_arch_clean
+
+nptl_linux_arch_clean:
+ $(do_rm) $(addprefix $(LINUX_ARCH_OUT)/*., o os oS)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h
new file mode 100644
index 000000000..7a09c8119
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h
@@ -0,0 +1,225 @@
+/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _BITS_PTHREADTYPES_H
+#define _BITS_PTHREADTYPES_H 1
+
+#include <bits/wordsize.h>
+
+#if __WORDSIZE == 64
+# define __SIZEOF_PTHREAD_ATTR_T 56
+# define __SIZEOF_PTHREAD_MUTEX_T 40
+# define __SIZEOF_PTHREAD_MUTEXATTR_T 4
+# define __SIZEOF_PTHREAD_COND_T 48
+# define __SIZEOF_PTHREAD_CONDATTR_T 4
+# define __SIZEOF_PTHREAD_RWLOCK_T 56
+# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8
+# define __SIZEOF_PTHREAD_BARRIER_T 32
+# define __SIZEOF_PTHREAD_BARRIERATTR_T 4
+#else
+# define __SIZEOF_PTHREAD_ATTR_T 36
+# define __SIZEOF_PTHREAD_MUTEX_T 24
+# define __SIZEOF_PTHREAD_MUTEXATTR_T 4
+# define __SIZEOF_PTHREAD_COND_T 48
+# define __SIZEOF_PTHREAD_CONDATTR_T 4
+# define __SIZEOF_PTHREAD_RWLOCK_T 32
+# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8
+# define __SIZEOF_PTHREAD_BARRIER_T 20
+# define __SIZEOF_PTHREAD_BARRIERATTR_T 4
+#endif
+
+
+/* Thread identifiers. The structure of the attribute type is not
+ exposed on purpose. */
+typedef unsigned long int pthread_t;
+
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_ATTR_T];
+ long int __align;
+} pthread_attr_t;
+
+
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
+/* Data structures for mutex handling. The structure of the attribute
+ type is not exposed on purpose. */
+typedef union
+{
+ struct __pthread_mutex_s
+ {
+ int __lock;
+ unsigned int __count;
+ int __owner;
+#if __WORDSIZE == 64
+ unsigned int __nusers;
+#endif
+ /* KIND must stay at this position in the structure to maintain
+ binary compatibility. */
+ int __kind;
+#if __WORDSIZE == 64
+ int __spins;
+ __pthread_list_t __list;
+# define __PTHREAD_MUTEX_HAVE_PREV 1
+#else
+ unsigned int __nusers;
+ __extension__ union
+ {
+ int __spins;
+ __pthread_slist_t __list;
+ };
+#endif
+ } __data;
+ char __size[__SIZEOF_PTHREAD_MUTEX_T];
+ long int __align;
+} pthread_mutex_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_MUTEXATTR_T];
+ int __align;
+} pthread_mutexattr_t;
+
+
+/* Data structure for conditional variable handling. The structure of
+ the attribute type is not exposed on purpose. */
+typedef union
+{
+ struct
+ {
+ int __lock;
+ unsigned int __futex;
+ __extension__ unsigned long long int __total_seq;
+ __extension__ unsigned long long int __wakeup_seq;
+ __extension__ unsigned long long int __woken_seq;
+ void *__mutex;
+ unsigned int __nwaiters;
+ unsigned int __broadcast_seq;
+ } __data;
+ char __size[__SIZEOF_PTHREAD_COND_T];
+ __extension__ long long int __align;
+} pthread_cond_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_CONDATTR_T];
+ int __align;
+} pthread_condattr_t;
+
+
+/* Keys for thread-specific data */
+typedef unsigned int pthread_key_t;
+
+
+/* Once-only execution */
+typedef int pthread_once_t;
+
+
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K
+/* Data structure for read-write lock variable handling. The
+ structure of the attribute type is not exposed on purpose. */
+typedef union
+{
+# if __WORDSIZE == 64
+ struct
+ {
+ int __lock;
+ unsigned int __nr_readers;
+ unsigned int __readers_wakeup;
+ unsigned int __writer_wakeup;
+ unsigned int __nr_readers_queued;
+ unsigned int __nr_writers_queued;
+ int __writer;
+ int __shared;
+ unsigned long int __pad1;
+ unsigned long int __pad2;
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned int __flags;
+ } __data;
+# else
+ struct
+ {
+ int __lock;
+ unsigned int __nr_readers;
+ unsigned int __readers_wakeup;
+ unsigned int __writer_wakeup;
+ unsigned int __nr_readers_queued;
+ unsigned int __nr_writers_queued;
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned char __flags;
+ unsigned char __shared;
+ unsigned char __pad1;
+ unsigned char __pad2;
+ int __writer;
+ } __data;
+# endif
+ char __size[__SIZEOF_PTHREAD_RWLOCK_T];
+ long int __align;
+} pthread_rwlock_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_RWLOCKATTR_T];
+ long int __align;
+} pthread_rwlockattr_t;
+#endif
+
+
+#ifdef __USE_XOPEN2K
+/* POSIX spinlock data type. */
+typedef volatile int pthread_spinlock_t;
+
+
+/* POSIX barriers data type. The structure of the type is
+ deliberately not exposed. */
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_BARRIER_T];
+ long int __align;
+} pthread_barrier_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_BARRIERATTR_T];
+ int __align;
+} pthread_barrierattr_t;
+#endif
+
+
+#if __WORDSIZE == 32
+/* Extra attributes for the cleanup functions. */
+# define __cleanup_fct_attribute __attribute__ ((__regparm__ (1)))
+#endif
+
+#endif /* bits/pthreadtypes.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
new file mode 100644
index 000000000..e973bc5bf
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
@@ -0,0 +1,41 @@
+/* Copyright (C) 2002, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _SEMAPHORE_H
+# error "Never use <bits/semaphore.h> directly; include <semaphore.h> instead."
+#endif
+
+#include <bits/wordsize.h>
+
+#if __WORDSIZE == 64
+# define __SIZEOF_SEM_T 32
+#else
+# define __SIZEOF_SEM_T 16
+#endif
+
+
+/* Value returned if `sem_open' failed. */
+#define SEM_FAILED ((sem_t *) 0)
+
+
+typedef union
+{
+ char __size[__SIZEOF_SEM_T];
+ long int __align;
+} sem_t;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S
new file mode 100644
index 000000000..680696200
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S
@@ -0,0 +1,116 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+#include "lowlevellock.h"
+
+#ifdef IS_IN_libpthread
+# ifdef SHARED
+# define __pthread_unwind __GI___pthread_unwind
+# endif
+#else
+# ifndef SHARED
+ .weak __pthread_unwind
+# endif
+#endif
+
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+#endif
+
+/* It is crucial that the functions in this file don't modify registers
+ other than %rax and %r11. The syscall wrapper code depends on this
+ because it doesn't explicitly save the other registers which hold
+ relevant values. */
+ .text
+
+ .hidden __pthread_enable_asynccancel
+ENTRY(__pthread_enable_asynccancel)
+ movl %fs:CANCELHANDLING, %eax
+2: movl %eax, %r11d
+ orl $TCB_CANCELTYPE_BITMASK, %r11d
+ cmpl %eax, %r11d
+ je 1f
+
+ lock
+ cmpxchgl %r11d, %fs:CANCELHANDLING
+ jnz 2b
+
+ andl $(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
+ cmpl $(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
+ je 3f
+
+1: ret
+
+3: movq $TCB_PTHREAD_CANCELED, %fs:RESULT
+ lock
+ orl $TCB_EXITING_BITMASK, %fs:CANCELHANDLING
+ movq %fs:CLEANUP_JMP_BUF, %rdi
+#ifdef SHARED
+ call __pthread_unwind@PLT
+#else
+ call __pthread_unwind
+#endif
+ hlt
+END(__pthread_enable_asynccancel)
+
+
+ .hidden __pthread_disable_asynccancel
+ENTRY(__pthread_disable_asynccancel)
+ testl $TCB_CANCELTYPE_BITMASK, %edi
+ jnz 1f
+
+ movl %fs:CANCELHANDLING, %eax
+2: movl %eax, %r11d
+ andl $~TCB_CANCELTYPE_BITMASK, %r11d
+ lock
+ cmpxchgl %r11d, %fs:CANCELHANDLING
+ jnz 2b
+
+ movl %r11d, %eax
+3: andl $(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
+ cmpl $TCB_CANCELING_BITMASK, %eax
+ je 4f
+1: ret
+
+ /* Performance doesn't matter in this loop. We will
+ delay until the thread is canceled. And we will unlikely
+ enter the loop twice. */
+4: movq %fs:0, %rdi
+ movl $__NR_futex, %eax
+ xorq %r10, %r10
+ addq $CANCELHANDLING, %rdi
+ LOAD_PRIVATE_FUTEX_WAIT (%esi)
+ syscall
+ movl %fs:CANCELHANDLING, %eax
+ jmp 3b
+END(__pthread_disable_asynccancel)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/clone.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/clone.S
new file mode 100644
index 000000000..efbaee3d1
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/clone.S
@@ -0,0 +1,3 @@
+#include <tcb-offsets.h>
+#define RESET_PID
+#include <libc/sysdeps/linux/x86_64/clone.S>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/compat-timer.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/compat-timer.h
new file mode 100644
index 000000000..02485daa5
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/compat-timer.h
@@ -0,0 +1,46 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <signal.h>
+#include <time.h>
+#include <sys/types.h>
+
+#define OLD_TIMER_MAX 256
+
+extern timer_t __compat_timer_list[OLD_TIMER_MAX] attribute_hidden;
+
+
+extern int __timer_create_new (clockid_t clock_id, struct sigevent *evp,
+ timer_t *timerid);
+extern int __timer_delete_new (timer_t timerid);
+extern int __timer_getoverrun_new (timer_t timerid);
+extern int __timer_gettime_new (timer_t timerid, struct itimerspec *value);
+extern int __timer_settime_new (timer_t timerid, int flags,
+ const struct itimerspec *value,
+ struct itimerspec *ovalue);
+
+
+extern int __timer_create_old (clockid_t clock_id, struct sigevent *evp,
+ int *timerid);
+extern int __timer_delete_old (int timerid);
+extern int __timer_getoverrun_old (int timerid);
+extern int __timer_gettime_old (int timerid, struct itimerspec *value);
+extern int __timer_settime_old (int timerid, int flags,
+ const struct itimerspec *value,
+ struct itimerspec *ovalue);
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/fork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/fork.c
new file mode 100644
index 000000000..c828e158d
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/fork.c
@@ -0,0 +1,31 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sched.h>
+#include <signal.h>
+#include <sysdep.h>
+#include <tls.h>
+
+
+#define ARCH_FORK() \
+ INLINE_SYSCALL (clone, 4, \
+ CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, 0, \
+ NULL, &THREAD_SELF->tid)
+
+#include "../fork.c"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
new file mode 100644
index 000000000..110058850
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
@@ -0,0 +1,22 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#define __pthread_enable_asynccancel __libc_enable_asynccancel
+#define __pthread_disable_asynccancel __libc_disable_asynccancel
+#include "cancellation.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
new file mode 100644
index 000000000..ce8ad27aa
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include "lowlevellock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
new file mode 100644
index 000000000..ce4192b5d
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
@@ -0,0 +1,22 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#define __pthread_enable_asynccancel __librt_enable_asynccancel
+#define __pthread_disable_asynccancel __librt_disable_asynccancel
+#include "cancellation.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
new file mode 100644
index 000000000..f87532359
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
@@ -0,0 +1,457 @@
+/* Copyright (C) 2002-2006, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
+#include <tcb-offsets.h>
+
+ .text
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
+#endif
+
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
+ .align 16
+__lll_lock_wait_private:
+ cfi_startproc
+ pushq %r10
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
+ xorq %r10, %r10 /* No timeout. */
+ movl $2, %edx
+ LOAD_PRIVATE_FUTEX_WAIT (%esi)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+2: movl %edx, %eax
+ xchgl %eax, (%rdi) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
+ retq
+ cfi_endproc
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
+
+#ifdef NOT_IN_libc
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
+ .align 16
+__lll_lock_wait:
+ cfi_startproc
+ pushq %r10
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
+ xorq %r10, %r10 /* No timeout. */
+ movl $2, %edx
+ LOAD_FUTEX_WAIT (%esi)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+2: movl %edx, %eax
+ xchgl %eax, (%rdi) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
+ retq
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ /* %rdi: futex
+ %rsi: flags
+ %rdx: timeout
+ %eax: futex value
+ */
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
+ .align 16
+__lll_timedlock_wait:
+ cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef __PIC__
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r9, 0)
+ movq %rdx, %r10
+ movl $0xffffffff, %r9d
+ LOAD_FUTEX_WAIT_ABS (%esi)
+
+ movl $2, %edx
+ cmpl %edx, %eax
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ movl $2, %edx
+ syscall
+
+2: xchgl %edx, (%rdi) /* NB: lock is implied */
+
+ testl %edx, %edx
+ jz 3f
+
+ cmpl $-ETIMEDOUT, %eax
+ je 4f
+ cmpl $-EINVAL, %eax
+ jne 1b
+4: movl %eax, %edx
+ negl %edx
+
+3: movl %edx, %eax
+ popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ retq
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+ /* Check for a valid timeout value. */
+ cmpq $1000000000, 8(%rdx)
+ jae 3f
+
+ pushq %r8
+ cfi_adjust_cfa_offset(8)
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r8, -16)
+ cfi_offset(%r9, -24)
+ cfi_offset(%r12, -32)
+ cfi_offset(%r13, -40)
+ cfi_offset(%r14, -48)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
+
+ /* Stack frame for the timespec and timeval structs. */
+ subq $24, %rsp
+ cfi_adjust_cfa_offset(24)
+
+ movq %rdi, %r12
+ movq %rdx, %r13
+
+ movl $2, %edx
+ xchgl %edx, (%r12)
+
+ testl %edx, %edx
+ je 6f
+
+1:
+ /* Get current time. */
+ movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ /* This is a regular function call, all caller-save registers
+ might be clobbered. */
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 4f
+ addq $1000000000, %rsi
+ decq %rdi
+4: testq %rdi, %rdi
+ js 2f /* Time is already up. */
+
+ /* Store relative timeout. */
+ movq %rdi, (%rsp)
+ movq %rsi, 8(%rsp)
+
+ /* Futex call. */
+ movl $2, %edx
+ movl $1, %eax
+ movq %rsp, %r10
+ movl 24(%rsp), %esi
+ LOAD_FUTEX_WAIT (%esi)
+ movq %r12, %rdi
+ movl $SYS_futex, %eax
+ syscall
+
+ /* NB: %edx == 2 */
+ xchgl %edx, (%r12)
+
+ testl %edx, %edx
+ je 6f
+
+ cmpl $-ETIMEDOUT, %eax
+ jne 1b
+2: movl $ETIMEDOUT, %edx
+
+6: addq $32, %rsp
+ cfi_adjust_cfa_offset(-32)
+ popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
+ popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ popq %r8
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r8)
+ movl %edx, %eax
+ retq
+
+3: movl $EINVAL, %eax
+ retq
+# endif
+ cfi_endproc
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
+#endif
+
+
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
+ .align 16
+__lll_unlock_wake_private:
+ cfi_startproc
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%rsi, -16)
+ cfi_offset(%rdx, -24)
+
+ movl $0, (%rdi)
+ LOAD_PRIVATE_FUTEX_WAKE (%esi)
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ syscall
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %rsi
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rsi)
+ retq
+ cfi_endproc
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
+
+#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
+ .align 16
+__lll_unlock_wake:
+ cfi_startproc
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%rsi, -16)
+ cfi_offset(%rdx, -24)
+
+ movl $0, (%rdi)
+ LOAD_FUTEX_WAKE (%esi)
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ syscall
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %rsi
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rsi)
+ retq
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
+
+ .globl __lll_timedwait_tid
+ .type __lll_timedwait_tid,@function
+ .hidden __lll_timedwait_tid
+ .align 16
+__lll_timedwait_tid:
+ cfi_startproc
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r12, -16)
+ cfi_offset(%r13, -24)
+
+ movq %rdi, %r12
+ movq %rsi, %r13
+
+ subq $16, %rsp
+ cfi_adjust_cfa_offset(16)
+
+ /* Get current time. */
+2: movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 5f
+ addq $1000000000, %rsi
+ decq %rdi
+5: testq %rdi, %rdi
+ js 6f /* Time is already up. */
+
+ movq %rdi, (%rsp) /* Store relative timeout. */
+ movq %rsi, 8(%rsp)
+
+ movl (%r12), %edx
+ testl %edx, %edx
+ jz 4f
+
+ movq %rsp, %r10
+ /* XXX The kernel so far uses global futex for the wakeup at
+ all times. */
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movq %r12, %rdi
+ movl $SYS_futex, %eax
+ syscall
+
+ cmpl $0, (%rdi)
+ jne 1f
+4: xorl %eax, %eax
+
+8: addq $16, %rsp
+ cfi_adjust_cfa_offset(-16)
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
+ retq
+
+ cfi_adjust_cfa_offset(32)
+1: cmpq $-ETIMEDOUT, %rax
+ jne 2b
+
+6: movl $ETIMEDOUT, %eax
+ jmp 8b
+ cfi_endproc
+ .size __lll_timedwait_tid,.-__lll_timedwait_tid
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
new file mode 100644
index 000000000..7c042fc80
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -0,0 +1,598 @@
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _LOWLEVELLOCK_H
+#define _LOWLEVELLOCK_H 1
+
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <bits/kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+# endif
+#else
+# ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
+# endif
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_WAIT_REQUEUE_PI 11
+#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+#ifndef __ASSEMBLER__
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG); \
+ __asm__ ("andl %%fs:%P1, %0" : "+r" (__fl) \
+ : "i" (offsetof (struct pthread, header.private_futex))); \
+ __fl | (fl); }))
+# endif
+#endif
+
+/* Initializer for lock. */
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
+
+/* Delay in spinlock loop. */
+#define BUSY_WAIT_NOP __asm__ ("rep; nop")
+
+
+#define LLL_STUB_UNWIND_INFO_START \
+ ".section .eh_frame,\"a\",@progbits\n" \
+"7:\t" ".long 9f-8f # Length of Common Information Entry\n" \
+"8:\t" ".long 0x0 # CIE Identifier Tag\n\t" \
+ ".byte 0x1 # CIE Version\n\t" \
+ ".ascii \"zR\\0\" # CIE Augmentation\n\t" \
+ ".uleb128 0x1 # CIE Code Alignment Factor\n\t" \
+ ".sleb128 -8 # CIE Data Alignment Factor\n\t" \
+ ".byte 0x10 # CIE RA Column\n\t" \
+ ".uleb128 0x1 # Augmentation size\n\t" \
+ ".byte 0x1b # FDE Encoding (pcrel sdata4)\n\t" \
+ ".byte 0x12 # DW_CFA_def_cfa_sf\n\t" \
+ ".uleb128 0x7\n\t" \
+ ".sleb128 16\n\t" \
+ ".align 8\n" \
+"9:\t" ".long 23f-10f # FDE Length\n" \
+"10:\t" ".long 10b-7b # FDE CIE offset\n\t" \
+ ".long 1b-. # FDE initial location\n\t" \
+ ".long 6b-1b # FDE address range\n\t" \
+ ".uleb128 0x0 # Augmentation size\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 12f-11f\n" \
+"11:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-1b\n"
+#define LLL_STUB_UNWIND_INFO_END \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 14f-13f\n" \
+"13:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-2b\n" \
+"14:\t" ".byte 0x40 + (3b-2b) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x0e # DW_CFA_def_cfa_offset\n\t" \
+ ".uleb128 0\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 16f-15f\n" \
+"15:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-3b\n" \
+"16:\t" ".byte 0x40 + (4b-3b-1) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x0e # DW_CFA_def_cfa_offset\n\t" \
+ ".uleb128 128\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 20f-17f\n" \
+"17:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 19f-18f\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"18:\t" ".4byte 4b-.\n\t" \
+ ".byte 0x1c # DW_OP_minus\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"19:\t" ".4byte 24f-.\n\t" \
+ ".byte 0x22 # DW_OP_plus\n" \
+"20:\t" ".byte 0x40 + (5b-4b+1) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x13 # DW_CFA_def_cfa_offset_sf\n\t" \
+ ".sleb128 16\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 22f-21f\n" \
+"21:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-5b\n" \
+"22:\t" ".align 8\n" \
+"23:\t" ".previous\n"
+
+/* Unwind info for
+ 1: leaq ..., %rdi
+ 2: subq $128, %rsp
+ 3: callq ...
+ 4: addq $128, %rsp
+ 5: jmp 24f
+ 6:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_5 \
+LLL_STUB_UNWIND_INFO_START \
+"12:\t" ".byte 0x40 + (2b-1b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+/* Unwind info for
+ 1: leaq ..., %rdi
+ 0: movq ..., %rdx
+ 2: subq $128, %rsp
+ 3: callq ...
+ 4: addq $128, %rsp
+ 5: jmp 24f
+ 6:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_6 \
+LLL_STUB_UNWIND_INFO_START \
+"12:\t" ".byte 0x40 + (0b-1b) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 26f-25f\n" \
+"25:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-0b\n" \
+"26:\t" ".byte 0x40 + (2b-0b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+
+#define lll_futex_wait(futex, val, private) \
+ lll_futex_timed_wait(futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
+ ({ \
+ register const struct timespec *__to __asm__ ("r10") = timeout; \
+ int __status; \
+ register __typeof (val) _val __asm__ ("edx") = (val); \
+ __asm__ __volatile ("syscall" \
+ : "=a" (__status) \
+ : "0" (SYS_futex), "D" (futex), \
+ "S" (__lll_private_flag (FUTEX_WAIT, private)), \
+ "d" (_val), "r" (__to) \
+ : "memory", "cc", "r11", "cx"); \
+ __status; \
+ })
+
+
+#define lll_futex_wake(futex, nr, private) \
+ do { \
+ int __ignore; \
+ register __typeof (nr) _nr __asm__ ("edx") = (nr); \
+ __asm__ __volatile ("syscall" \
+ : "=a" (__ignore) \
+ : "0" (SYS_futex), "D" (futex), \
+ "S" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "d" (_nr) \
+ : "memory", "cc", "r10", "r11", "cx"); \
+ } while (0)
+
+
+/* NB: in the lll_trylock macro we simply return the value in %eax
+ after the cmpxchg instruction. In case the operation succeded this
+ value is zero. In case the operation failed, the cmpxchg instruction
+ has loaded the current value of the memory work which is guaranteed
+ to be nonzero. */
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %2, %1\n\t" \
+ "jmp 1f\n\t" \
+ "0:\tcmpxchgl %2, %1\n\t" \
+ "1:"
+#endif
+
+#define lll_trylock(futex) \
+ ({ int ret; \
+ __asm__ __volatile (__lll_trylock_asm \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER) \
+ : "memory"); \
+ ret; })
+
+#define lll_robust_trylock(futex, id) \
+ ({ int ret; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
+ : "memory"); \
+ ret; })
+
+#define lll_cond_trylock(futex) \
+ ({ int ret; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
+ : "memory"); \
+ ret; })
+
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ "jmp 24f\n" \
+ "0:\tcmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#endif
+
+#define lll_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); \
+ else \
+ __asm__ __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (1), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ }) \
+
+#define lll_robust_lock(futex, id, private) \
+ ({ int result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_lock_%=, @function\n" \
+ "_L_robust_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (result) \
+ : "1" (id), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ result; })
+
+#define lll_cond_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_cond_lock_%=, @function\n" \
+ "_L_cond_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_cond_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (2), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ })
+
+#define lll_robust_cond_lock(futex, id, private) \
+ ({ int result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_cond_lock_%=, @function\n" \
+ "_L_robust_cond_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (result) \
+ : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \
+ "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ result; })
+
+#define lll_timedlock(futex, timeout, private) \
+ ({ int result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_timedlock_%=, @function\n" \
+ "_L_timedlock_%=:\n" \
+ "1:\tleaq %4, %%rdi\n" \
+ "0:\tmovq %8, %%rdx\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_timedlock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_timedlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_6 \
+ "24:" \
+ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
+ "=&d" (ignore3), "=m" (futex) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
+ "2" (private) \
+ : "memory", "cx", "cc", "r10", "r11"); \
+ result; })
+
+#define lll_robust_timedlock(futex, timeout, id, private) \
+ ({ int result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_timedlock_%=, @function\n" \
+ "_L_robust_timedlock_%=:\n" \
+ "1:\tleaq %4, %%rdi\n" \
+ "0:\tmovq %8, %%rdx\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_robust_timedlock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_6 \
+ "24:" \
+ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
+ "=&d" (ignore3), "=m" (futex) \
+ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
+ "2" (private) \
+ : "memory", "cx", "cc", "r10", "r11"); \
+ result; })
+
+#if defined NOT_IN_libc || defined UP
+# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
+ "jne 1f\n\t"
+#else
+# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; decl %0\n\t" \
+ "jne 1f\n\t" \
+ "jmp 24f\n\t" \
+ "0:\tdecl %0\n\t" \
+ "jne 1f\n\t"
+#endif
+
+#define lll_unlock(futex, private) \
+ (void) \
+ ({ int ignore; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_unlock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=, @function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ else \
+ __asm__ __volatile (__lll_unlock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=, @function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex), "S" (private) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ })
+
+#define lll_robust_unlock(futex, private) \
+ do \
+ { \
+ int ignore; \
+ __asm__ __volatile (LOCK_INSTR "andl %2, %0\n\t" \
+ "jne 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_unlock_%=, @function\n" \
+ "_L_robust_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "i" (FUTEX_WAITERS), "m" (futex), \
+ "S" (private) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ } \
+ while (0)
+
+#define lll_robust_dead(futex, private) \
+ do \
+ { \
+ int ignore; \
+ __asm__ __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
+ "syscall" \
+ : "=m" (futex), "=a" (ignore) \
+ : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
+ "S" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "1" (__NR_futex), "d" (1) \
+ : "cx", "r11", "cc", "memory"); \
+ } \
+ while (0)
+
+/* Returns non-zero if error happened, zero if success. */
+#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val, private) \
+ ({ int __res; \
+ register int __nr_move __asm__ ("r10") = nr_move; \
+ register void *__mutex __asm__ ("r8") = mutex; \
+ register int __val __asm__ ("r9") = val; \
+ __asm__ __volatile ("syscall" \
+ : "=a" (__res) \
+ : "0" (__NR_futex), "D" ((void *) ftx), \
+ "S" (__lll_private_flag (FUTEX_CMP_REQUEUE, \
+ private)), "d" (nr_wake), \
+ "r" (__nr_move), "r" (__mutex), "r" (__val) \
+ : "cx", "r11", "cc", "memory"); \
+ __res < 0; })
+
+#define lll_islocked(futex) \
+ (futex != LLL_LOCK_INITIALIZER)
+
+
+/* The kernel notifies a process with uses CLONE_CLEARTID via futex
+ wakeup when the clone terminates. The memory location contains the
+ thread ID while the clone is running and is reset to zero
+ afterwards.
+
+ The macro parameter must not have any side effect. */
+#define lll_wait_tid(tid) \
+ do { \
+ int __ignore; \
+ register __typeof (tid) _tid __asm__ ("edx") = (tid); \
+ if (_tid != 0) \
+ __asm__ __volatile ("xorq %%r10, %%r10\n\t" \
+ "1:\tmovq %2, %%rax\n\t" \
+ "syscall\n\t" \
+ "cmpl $0, (%%rdi)\n\t" \
+ "jne 1b" \
+ : "=&a" (__ignore) \
+ : "S" (FUTEX_WAIT), "i" (SYS_futex), "D" (&tid), \
+ "d" (_tid) \
+ : "memory", "cc", "r10", "r11", "cx"); \
+ } while (0)
+
+extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
+ attribute_hidden;
+#define lll_timedwait_tid(tid, abstime) \
+ ({ \
+ int __result = 0; \
+ if (tid != 0) \
+ { \
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) \
+ __result = EINVAL; \
+ else \
+ __result = __lll_timedwait_tid (&tid, abstime); \
+ } \
+ __result; })
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
new file mode 100644
index 000000000..2eb8e29fa
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
@@ -0,0 +1,306 @@
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <bits/kernel-features.h>
+
+ .text
+
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+#endif
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
+ .align 16
+__lll_robust_lock_wait:
+ cfi_startproc
+ pushq %r10
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
+
+ xorq %r10, %r10 /* No timeout. */
+ LOAD_FUTEX_WAIT (%esi)
+
+4: movl %eax, %edx
+ orl $FUTEX_WAITERS, %edx
+
+ testl $FUTEX_OWNER_DIED, %eax
+ jnz 3f
+
+ cmpl %edx, %eax
+ je 1f
+
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+ movl (%rdi), %eax
+
+2: testl %eax, %eax
+ jne 4b
+
+ movl %fs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 4b
+ /* NB: %rax == 0 */
+
+3: popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
+ retq
+ cfi_endproc
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
+ .align 16
+__lll_robust_timedlock_wait:
+ cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef __PIC__
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r9, 0)
+ movq %rdx, %r10
+ movl $0xffffffff, %r9d
+ LOAD_FUTEX_WAIT_ABS (%esi)
+
+1: testl $FUTEX_OWNER_DIED, %eax
+ jnz 3f
+
+ movl %eax, %edx
+ orl $FUTEX_WAITERS, %edx
+
+ cmpl %eax, %edx
+ je 5f
+
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ movq $0, %rcx /* Must use mov to avoid changing cc. */
+ jnz 6f
+
+5: movl $SYS_futex, %eax
+ syscall
+ movl %eax, %ecx
+
+ movl (%rdi), %eax
+
+6: testl %eax, %eax
+ jne 2f
+
+ movl %fs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 2f
+
+3: popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ retq
+
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r9, 0)
+ /* Check whether the time expired. */
+2: cmpl $-ETIMEDOUT, %ecx
+ je 4f
+ cmpl $-EINVAL, %ecx
+ jne 1b
+
+4: movl %ecx, %eax
+ negl %eax
+ jmp 3b
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+ /* Check for a valid timeout value. */
+ cmpq $1000000000, 8(%rdx)
+ jae 3f
+
+ pushq %r8
+ cfi_adjust_cfa_offset(8)
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r8, -16)
+ cfi_offset(%r9, -24)
+ cfi_offset(%r12, -32)
+ cfi_offset(%r13, -40)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
+
+ /* Stack frame for the timespec and timeval structs. */
+ subq $32, %rsp
+ cfi_adjust_cfa_offset(32)
+
+ movq %rdi, %r12
+ movq %rdx, %r13
+
+1: movq %rax, 16(%rsp)
+
+ /* Get current time. */
+ movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ /* This is a regular function call, all caller-save registers
+ might be clobbered. */
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 4f
+ addq $1000000000, %rsi
+ decq %rdi
+4: testq %rdi, %rdi
+ js 8f /* Time is already up. */
+
+ /* Futex call. */
+ movq %rdi, (%rsp) /* Store relative timeout. */
+ movq %rsi, 8(%rsp)
+
+ movq 16(%rsp), %rdx
+ movl %edx, %eax
+ orl $FUTEX_WAITERS, %edx
+
+ testl $FUTEX_OWNER_DIED, %eax
+ jnz 6f
+
+ cmpl %eax, %edx
+ je 2f
+
+ LOCK
+ cmpxchgl %edx, (%r12)
+ movq $0, %rcx /* Must use mov to avoid changing cc. */
+ jnz 5f
+
+2: movq %rsp, %r10
+ movl 32(%rsp), %esi
+ LOAD_FUTEX_WAIT (%esi)
+ movq %r12, %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rcx
+
+ movl (%r12), %eax
+
+5: testl %eax, %eax
+ jne 7f
+
+ movl %fs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%r12)
+ jnz 7f
+
+6: addq $40, %rsp
+ cfi_adjust_cfa_offset(-40)
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
+ popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ popq %r8
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r8)
+ retq
+
+3: movl $EINVAL, %eax
+ retq
+
+ cfi_adjust_cfa_offset(72)
+ cfi_offset(%r8, -16)
+ cfi_offset(%r9, -24)
+ cfi_offset(%r12, -32)
+ cfi_offset(%r13, -40)
+ /* Check whether the time expired. */
+7: cmpl $-ETIMEDOUT, %ecx
+ jne 1b
+
+8: movl $ETIMEDOUT, %eax
+ jmp 6b
+#endif
+ cfi_endproc
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-__syscall_error.c b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-__syscall_error.c
new file mode 100644
index 000000000..2ab81490c
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-__syscall_error.c
@@ -0,0 +1 @@
+#include <../../../../../../../libc/sysdeps/linux/x86_64/__syscall_error.c>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S
new file mode 100644
index 000000000..df4949615
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S
@@ -0,0 +1,33 @@
+/* Copyright (C) 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <tcb-offsets.h>
+
+#define SAVE_PID \
+ movl %fs:PID, %esi; \
+ movl %esi, %edx; \
+ negl %edx; \
+ movl %edx, %fs:PID
+
+#define RESTORE_PID \
+ testq %rax, %rax; \
+ je 1f; \
+ movl %esi, %fs:PID; \
+1:
+
+#include <../../../../../../../libc/sysdeps/linux/x86_64/vfork.S>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
new file mode 100644
index 000000000..15ad534fa
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
@@ -0,0 +1,161 @@
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelbarrier.h>
+
+
+ .text
+
+ .globl pthread_barrier_wait
+ .type pthread_barrier_wait,@function
+ .align 16
+pthread_barrier_wait:
+ /* Get the mutex. */
+ xorl %eax, %eax
+ movl $1, %esi
+ LOCK
+ cmpxchgl %esi, MUTEX(%rdi)
+ jnz 1f
+
+ /* One less waiter. If this was the last one needed wake
+ everybody. */
+2: decl LEFT(%rdi)
+ je 3f
+
+ /* There are more threads to come. */
+#if CURR_EVENT == 0
+ movl (%rdi), %edx
+#else
+ movl CURR_EVENT(%rdi), %edx
+#endif
+
+ /* Release the mutex. */
+ LOCK
+ decl MUTEX(%rdi)
+ jne 6f
+
+ /* Wait for the remaining threads. The call will return immediately
+ if the CURR_EVENT memory has meanwhile been changed. */
+7:
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%rdi), %esi
+#else
+ movl $FUTEX_WAIT, %esi
+ orl PRIVATE(%rdi), %esi
+#endif
+ xorq %r10, %r10
+8: movl $SYS_futex, %eax
+ syscall
+
+ /* Don't return on spurious wakeups. The syscall does not change
+ any register except %eax so there is no need to reload any of
+ them. */
+#if CURR_EVENT == 0
+ cmpl %edx, (%rdi)
+#else
+ cmpl %edx, CURR_EVENT(%rdi)
+#endif
+ je 8b
+
+ /* Increment LEFT. If this brings the count back to the
+ initial count unlock the object. */
+ movl $1, %edx
+ movl INIT_COUNT(%rdi), %eax
+ LOCK
+ xaddl %edx, LEFT(%rdi)
+ subl $1, %eax
+ cmpl %eax, %edx
+ jne,pt 10f
+
+ /* Release the mutex. We cannot release the lock before
+ waking the waiting threads since otherwise a new thread might
+ arrive and gets waken up, too. */
+ LOCK
+ decl MUTEX(%rdi)
+ jne 9f
+
+10: xorl %eax, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
+
+ retq
+
+ /* The necessary number of threads arrived. */
+3:
+#if CURR_EVENT == 0
+ incl (%rdi)
+#else
+ incl CURR_EVENT(%rdi)
+#endif
+
+ /* Wake up all waiters. The count is a signed number in the kernel
+ so 0x7fffffff is the highest value. */
+ movl $0x7fffffff, %edx
+ movl $FUTEX_WAKE, %esi
+ orl PRIVATE(%rdi), %esi
+ movl $SYS_futex, %eax
+ syscall
+
+ /* Increment LEFT. If this brings the count back to the
+ initial count unlock the object. */
+ movl $1, %edx
+ movl INIT_COUNT(%rdi), %eax
+ LOCK
+ xaddl %edx, LEFT(%rdi)
+ subl $1, %eax
+ cmpl %eax, %edx
+ jne,pt 5f
+
+ /* Release the mutex. We cannot release the lock before
+ waking the waiting threads since otherwise a new thread might
+ arrive and gets waken up, too. */
+ LOCK
+ decl MUTEX(%rdi)
+ jne 4f
+
+5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
+
+ retq
+
+1: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_lock_wait
+ subq $MUTEX, %rdi
+ jmp 2b
+
+4: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
+ jmp 5b
+
+6: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
+ subq $MUTEX, %rdi
+ jmp 7b
+
+9: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
+ jmp 10b
+ .size pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
new file mode 100644
index 000000000..0f8037ba2
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
@@ -0,0 +1,177 @@
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <bits/kernel-features.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
+
+
+ .text
+
+ /* int pthread_cond_broadcast (pthread_cond_t *cond) */
+ .globl __pthread_cond_broadcast
+ .type __pthread_cond_broadcast, @function
+ .align 16
+__pthread_cond_broadcast:
+
+ /* Get internal lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jnz 1f
+
+2: addq $cond_futex, %rdi
+ movq total_seq-cond_futex(%rdi), %r9
+ cmpq wakeup_seq-cond_futex(%rdi), %r9
+ jna 4f
+
+ /* Cause all currently waiting threads to recognize they are
+ woken up. */
+ movq %r9, wakeup_seq-cond_futex(%rdi)
+ movq %r9, woken_seq-cond_futex(%rdi)
+ addq %r9, %r9
+ movl %r9d, (%rdi)
+ incl broadcast_seq-cond_futex(%rdi)
+
+ /* Get the address of the mutex used. */
+ movq dep_mutex-cond_futex(%rdi), %r8
+
+ /* Unlock. */
+ LOCK
+ decl cond_lock-cond_futex(%rdi)
+ jne 7f
+
+8: cmpq $-1, %r8
+ je 9f
+
+ /* Do not use requeue for pshared condvars. */
+ testl $PS_BIT, MUTEX_KIND(%r8)
+ jne 9f
+
+ /* Requeue to a PI mutex if the PI bit is set. */
+ movl MUTEX_KIND(%r8), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ je 81f
+
+ /* Wake up all threads. */
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %esi
+#else
+ movl %fs:PRIVATE_FUTEX, %esi
+ orl $FUTEX_CMP_REQUEUE, %esi
+#endif
+ movl $SYS_futex, %eax
+ movl $1, %edx
+ movl $0x7fffffff, %r10d
+ syscall
+
+ /* For any kind of error, which mainly is EAGAIN, we try again
+ with WAKE. The general test also covers running on old
+ kernels. */
+ cmpq $-4095, %rax
+ jae 9f
+
+10: xorl %eax, %eax
+ retq
+
+ /* Wake up all threads. */
+81: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+ movl $SYS_futex, %eax
+ movl $1, %edx
+ movl $0x7fffffff, %r10d
+ syscall
+
+ /* For any kind of error, which mainly is EAGAIN, we try again
+ with WAKE. The general test also covers running on old
+ kernels. */
+ cmpq $-4095, %rax
+ jb 10b
+ jmp 9f
+
+ .align 16
+ /* Unlock. */
+4: LOCK
+ decl cond_lock-cond_futex(%rdi)
+ jne 5f
+
+6: xorl %eax, %eax
+ retq
+
+ /* Initial locking failed. */
+1:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+5: addq $cond_lock-cond_futex, %rdi
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 6b
+
+ /* Unlock in loop requires wakeup. */
+7: addq $cond_lock-cond_futex, %rdi
+ cmpq $-1, %r8
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ subq $cond_lock-cond_futex, %rdi
+ jmp 8b
+
+9: /* The futex requeue functionality is not available. */
+ cmpq $-1, %r8
+ movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+ jmp 10b
+ .size __pthread_cond_broadcast, .-__pthread_cond_broadcast
+weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
new file mode 100644
index 000000000..568c98470
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
@@ -0,0 +1,160 @@
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <pthread-pi-defines.h>
+#include <bits/kernel-features.h>
+#include <pthread-errnos.h>
+
+
+ .text
+
+ /* int pthread_cond_signal (pthread_cond_t *cond) */
+ .globl __pthread_cond_signal
+ .type __pthread_cond_signal, @function
+ .align 16
+__pthread_cond_signal:
+
+ /* Get internal lock. */
+ movq %rdi, %r8
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jnz 1f
+
+2: addq $cond_futex, %rdi
+ movq total_seq(%r8), %rcx
+ cmpq wakeup_seq(%r8), %rcx
+ jbe 4f
+
+ /* Bump the wakeup number. */
+ addq $1, wakeup_seq(%r8)
+ addl $1, (%rdi)
+
+ /* Wake up one thread. */
+ cmpq $-1, dep_mutex(%r8)
+ movl $FUTEX_WAKE_OP, %esi
+ movl $1, %edx
+ movl $SYS_futex, %eax
+ je 8f
+
+ /* Get the address of the mutex used. */
+ movq dep_mutex(%r8), %rcx
+ movl MUTEX_KIND(%rcx), %r11d
+ andl $(ROBUST_BIT|PI_BIT), %r11d
+ cmpl $PI_BIT, %r11d
+ je 9f
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), %esi
+#else
+ orl %fs:PRIVATE_FUTEX, %esi
+#endif
+
+8: movl $1, %r10d
+#if cond_lock != 0
+ addq $cond_lock, %r8
+#endif
+ movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %r9d
+ syscall
+#if cond_lock != 0
+ subq $cond_lock, %r8
+#endif
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ cmpq $-4095, %rax
+ jae 7f
+
+ xorl %eax, %eax
+ retq
+
+ /* Wake up one thread and requeue none in the PI Mutex case. */
+9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+ movq %rcx, %r8
+ xorq %r10, %r10
+ movl (%rdi), %r9d // XXX Can this be right?
+ syscall
+
+ leaq -cond_futex(%rdi), %r8
+
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ cmpq $-4095, %rax
+ jb 4f
+
+7:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %esi
+#else
+ andl %fs:PRIVATE_FUTEX, %esi
+#endif
+ orl $FUTEX_WAKE, %esi
+ movl $SYS_futex, %eax
+ /* %rdx should be 1 already from $FUTEX_WAKE_OP syscall.
+ movl $1, %edx */
+ syscall
+
+ /* Unlock. */
+4: LOCK
+#if cond_lock == 0
+ decl (%r8)
+#else
+ decl cond_lock(%r8)
+#endif
+ jne 5f
+
+6: xorl %eax, %eax
+ retq
+
+ /* Initial locking failed. */
+1:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+5:
+ movq %r8, %rdi
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 6b
+ .size __pthread_cond_signal, .-__pthread_cond_signal
+weak_alias(__pthread_cond_signal, pthread_cond_signal)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
new file mode 100644
index 000000000..bc5c0b39b
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -0,0 +1,803 @@
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+
+ .text
+
+
+/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime) */
+ .globl __pthread_cond_timedwait
+ .type __pthread_cond_timedwait, @function
+ .align 16
+__pthread_cond_timedwait:
+.LSTARTCODE:
+ cfi_startproc
+
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r12, 0)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r14, 0)
+ pushq %r15
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r15, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define FRAME_SIZE 32
+#else
+# define FRAME_SIZE 48
+#endif
+ subq $FRAME_SIZE, %rsp
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+ cfi_remember_state
+
+ cmpq $1000000000, 8(%rdx)
+ movl $EINVAL, %eax
+ jae 48f
+
+ /* Stack frame:
+
+ rsp + 48
+ +--------------------------+
+ rsp + 32 | timeout value |
+ +--------------------------+
+ rsp + 24 | old wake_seq value |
+ +--------------------------+
+ rsp + 16 | mutex pointer |
+ +--------------------------+
+ rsp + 8 | condvar pointer |
+ +--------------------------+
+ rsp + 4 | old broadcast_seq value |
+ +--------------------------+
+ rsp + 0 | old cancellation mode |
+ +--------------------------+
+ */
+
+ cmpq $-1, dep_mutex(%rdi)
+
+ /* Prepare structure passed to cancellation handler. */
+ movq %rdi, 8(%rsp)
+ movq %rsi, 16(%rsp)
+ movq %rdx, %r13
+
+ je 22f
+ movq %rsi, dep_mutex(%rdi)
+
+22:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef __PIC__
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+#endif
+
+ /* Get internal lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jnz 31f
+
+ /* Unlock the mutex. */
+32: movq 16(%rsp), %rdi
+ xorl %esi, %esi
+ callq __pthread_mutex_unlock_usercnt
+
+ testl %eax, %eax
+ jne 46f
+
+ movq 8(%rsp), %rdi
+ incq total_seq(%rdi)
+ incl cond_futex(%rdi)
+ addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Get and store current wakeup_seq value. */
+ movq 8(%rsp), %rdi
+ movq wakeup_seq(%rdi), %r9
+ movl broadcast_seq(%rdi), %edx
+ movq %r9, 24(%rsp)
+ movl %edx, 4(%rsp)
+
+38: movl cond_futex(%rdi), %r12d
+
+ /* Unlock. */
+ LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 33f
+
+.LcleanupSTART1:
+34: callq __pthread_enable_asynccancel
+ movl %eax, (%rsp)
+
+ movq %r13, %r10
+ movl $FUTEX_WAIT_BITSET, %esi
+ cmpq $-1, dep_mutex(%rdi)
+ je 60f
+
+ movq dep_mutex(%rdi), %r8
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%r8), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ jne 61f
+
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+ xorl %eax, %eax
+ /* The following only works like this because we only support
+ two clocks, represented using a single bit. */
+ testl $1, cond_nwaiters(%rdi)
+ movl $FUTEX_CLOCK_REALTIME, %edx
+ cmove %edx, %eax
+ orl %eax, %esi
+ movq %r12, %rdx
+ addq $cond_futex, %rdi
+ movl $SYS_futex, %eax
+ syscall
+
+ movl $1, %r15d
+#ifdef __ASSUME_REQUEUE_PI
+ jmp 62f
+#else
+ cmpq $-4095, %rax
+ jnae 62f
+
+ subq $cond_futex, %rdi
+#endif
+
+61: movl $(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi
+60: xorl %r15d, %r15d
+ xorl %eax, %eax
+ /* The following only works like this because we only support
+ two clocks, represented using a single bit. */
+ testl $1, cond_nwaiters(%rdi)
+ movl $FUTEX_CLOCK_REALTIME, %edx
+ movl $0xffffffff, %r9d
+ cmove %edx, %eax
+ orl %eax, %esi
+ movq %r12, %rdx
+ addq $cond_futex, %rdi
+ movl $SYS_futex, %eax
+ syscall
+62: movq %rax, %r14
+
+ movl (%rsp), %edi
+ callq __pthread_disable_asynccancel
+.LcleanupEND1:
+
+ /* Lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jne 35f
+
+36: movl broadcast_seq(%rdi), %edx
+
+ movq woken_seq(%rdi), %rax
+
+ movq wakeup_seq(%rdi), %r9
+
+ cmpl 4(%rsp), %edx
+ jne 53f
+
+ cmpq 24(%rsp), %r9
+ jbe 45f
+
+ cmpq %rax, %r9
+ ja 39f
+
+45: cmpq $-ETIMEDOUT, %r14
+ jne 38b
+
+99: incq wakeup_seq(%rdi)
+ incl cond_futex(%rdi)
+ movl $ETIMEDOUT, %r14d
+ jmp 44f
+
+53: xorq %r14, %r14
+ jmp 54f
+
+39: xorq %r14, %r14
+44: incq woken_seq(%rdi)
+
+54: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 55f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
+ jne 55f
+
+ addq $cond_nwaiters, %rdi
+ cmpq $-1, dep_mutex-cond_nwaiters(%rdi)
+ movl $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+ subq $cond_nwaiters, %rdi
+
+55: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 40f
+
+ /* If requeue_pi is used the kernel performs the locking of the
+ mutex. */
+41: movq 16(%rsp), %rdi
+ testl %r15d, %r15d
+ jnz 64f
+
+ callq __pthread_mutex_cond_lock
+
+63: testq %rax, %rax
+ cmoveq %r14, %rax
+
+48: addq $FRAME_SIZE, %rsp
+ cfi_adjust_cfa_offset(-FRAME_SIZE)
+ popq %r15
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r15)
+ popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
+
+ retq
+
+ cfi_restore_state
+
+64: callq __pthread_mutex_cond_lock_adjust
+ movq %r14, %rax
+ jmp 48b
+
+ /* Initial locking failed. */
+31:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+ jmp 32b
+
+ /* Unlock in loop requires wakeup. */
+33:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 34b
+
+ /* Locking in loop failed. */
+35:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 36b
+
+ /* Unlock after loop requires wakeup. */
+40:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 41b
+
+ /* The initial unlocking of the mutex failed. */
+46: movq 8(%rsp), %rdi
+ movq %rax, (%rsp)
+ LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 47f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+
+47: movq (%rsp), %rax
+ jmp 48b
+
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+ xorl %r15d, %r15d
+
+ /* Get internal lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+# if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+# else
+ cmpxchgl %esi, cond_lock(%rdi)
+# endif
+ jnz 1f
+
+ /* Unlock the mutex. */
+2: movq 16(%rsp), %rdi
+ xorl %esi, %esi
+ callq __pthread_mutex_unlock_usercnt
+
+ testl %eax, %eax
+ jne 46b
+
+ movq 8(%rsp), %rdi
+ incq total_seq(%rdi)
+ incl cond_futex(%rdi)
+ addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Get and store current wakeup_seq value. */
+ movq 8(%rsp), %rdi
+ movq wakeup_seq(%rdi), %r9
+ movl broadcast_seq(%rdi), %edx
+ movq %r9, 24(%rsp)
+ movl %edx, 4(%rsp)
+
+ /* Get the current time. */
+8:
+# ifdef __NR_clock_gettime
+ /* Get the clock number. Note that the field in the condvar
+ structure stores the number minus 1. */
+ movq 8(%rsp), %rdi
+ movl cond_nwaiters(%rdi), %edi
+ andl $((1 << nwaiters_shift) - 1), %edi
+ /* Only clocks 0 and 1 are allowed so far. Both are handled in the
+ kernel. */
+ leaq 32(%rsp), %rsi
+# ifdef SHARED
+ movq __vdso_clock_gettime@GOTPCREL(%rip), %rax
+ movq (%rax), %rax
+ PTR_DEMANGLE (%rax)
+ jz 26f
+ call *%rax
+ jmp 27f
+# endif
+26: movl $__NR_clock_gettime, %eax
+ syscall
+27:
+# ifndef __ASSUME_POSIX_TIMERS
+ cmpq $-ENOSYS, %rax
+ je 19f
+# endif
+
+ /* Compute relative timeout. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdx
+ subq 32(%rsp), %rcx
+ subq 40(%rsp), %rdx
+# else
+ leaq 24(%rsp), %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 40(%rsp), %rax
+ movl $1000, %edx
+ mul %rdx /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdx
+ subq 32(%rsp), %rcx
+ subq %rax, %rdx
+# endif
+ jns 12f
+ addq $1000000000, %rdx
+ decq %rcx
+12: testq %rcx, %rcx
+ movq 8(%rsp), %rdi
+ movq $-ETIMEDOUT, %r14
+ js 6f
+
+ /* Store relative timeout. */
+21: movq %rcx, 32(%rsp)
+ movq %rdx, 40(%rsp)
+
+ movl cond_futex(%rdi), %r12d
+
+ /* Unlock. */
+ LOCK
+# if cond_lock == 0
+ decl (%rdi)
+# else
+ decl cond_lock(%rdi)
+# endif
+ jne 3f
+
+.LcleanupSTART2:
+4: callq __pthread_enable_asynccancel
+ movl %eax, (%rsp)
+
+ leaq 32(%rsp), %r10
+ cmpq $-1, dep_mutex(%rdi)
+ movq %r12, %rdx
+# ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAIT, %eax
+ movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+# else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %esi
+# endif
+# endif
+ addq $cond_futex, %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %r14
+
+ movl (%rsp), %edi
+ callq __pthread_disable_asynccancel
+.LcleanupEND2:
+
+ /* Lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+# if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+# else
+ cmpxchgl %esi, cond_lock(%rdi)
+# endif
+ jne 5f
+
+6: movl broadcast_seq(%rdi), %edx
+
+ movq woken_seq(%rdi), %rax
+
+ movq wakeup_seq(%rdi), %r9
+
+ cmpl 4(%rsp), %edx
+ jne 53b
+
+ cmpq 24(%rsp), %r9
+ jbe 15f
+
+ cmpq %rax, %r9
+ ja 39b
+
+15: cmpq $-ETIMEDOUT, %r14
+ jne 8b
+
+ jmp 99b
+
+ /* Initial locking failed. */
+1:
+# if cond_lock != 0
+ addq $cond_lock, %rdi
+# endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+3:
+# if cond_lock != 0
+ addq $cond_lock, %rdi
+# endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 4b
+
+ /* Locking in loop failed. */
+5:
+# if cond_lock != 0
+ addq $cond_lock, %rdi
+# endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+# if cond_lock != 0
+ subq $cond_lock, %rdi
+# endif
+ jmp 6b
+
+# if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
+ /* clock_gettime not available. */
+19: leaq 32(%rsp), %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 40(%rsp), %rax
+ movl $1000, %edx
+ mul %rdx /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdx
+ subq 32(%rsp), %rcx
+ subq %rax, %rdx
+ jns 20f
+ addq $1000000000, %rdx
+ decq %rcx
+20: testq %rcx, %rcx
+ movq 8(%rsp), %rdi
+ movq $-ETIMEDOUT, %r14
+ js 6b
+ jmp 21b
+# endif
+#endif
+ .size __pthread_cond_timedwait, .-__pthread_cond_timedwait
+weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
+
+
+ .align 16
+ .type __condvar_cleanup2, @function
+__condvar_cleanup2:
+ /* Stack frame:
+
+ rsp + 72
+ +--------------------------+
+ rsp + 64 | %r12 |
+ +--------------------------+
+ rsp + 56 | %r13 |
+ +--------------------------+
+ rsp + 48 | %r14 |
+ +--------------------------+
+ rsp + 24 | unused |
+ +--------------------------+
+ rsp + 16 | mutex pointer |
+ +--------------------------+
+ rsp + 8 | condvar pointer |
+ +--------------------------+
+ rsp + 4 | old broadcast_seq value |
+ +--------------------------+
+ rsp + 0 | old cancellation mode |
+ +--------------------------+
+ */
+
+ movq %rax, 24(%rsp)
+
+ /* Get internal lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jz 1f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+
+1: movl broadcast_seq(%rdi), %edx
+ cmpl 4(%rsp), %edx
+ jne 3f
+
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ movq total_seq(%rdi), %rax
+ cmpq wakeup_seq(%rdi), %rax
+ jbe 6f
+ incq wakeup_seq(%rdi)
+ incl cond_futex(%rdi)
+6: incq woken_seq(%rdi)
+
+3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ xorq %r12, %r12
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 4f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
+ jne 4f
+
+ cmpq $-1, dep_mutex(%rdi)
+ leaq cond_nwaiters(%rdi), %rdi
+ movl $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+ subq $cond_nwaiters, %rdi
+ movl $1, %r12d
+
+4: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ je 2f
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+
+ /* Wake up all waiters to make sure no signal gets lost. */
+2: testq %r12, %r12
+ jnz 5f
+ addq $cond_futex, %rdi
+ cmpq $-1, dep_mutex-cond_futex(%rdi)
+ movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+
+5: movq 16(%rsp), %rdi
+ callq __pthread_mutex_cond_lock
+
+ movq 24(%rsp), %rdi
+ movq FRAME_SIZE(%rsp), %r15
+ movq FRAME_SIZE+8(%rsp), %r14
+ movq FRAME_SIZE+16(%rsp), %r13
+ movq FRAME_SIZE+24(%rsp), %r12
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ cfi_endproc
+ .size __condvar_cleanup2, .-__condvar_cleanup2
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART1-.LSTARTCODE
+ .uleb128 .LcleanupEND1-.LcleanupSTART1
+ .uleb128 __condvar_cleanup2-.LSTARTCODE
+ .uleb128 0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .uleb128 .LcleanupSTART2-.LSTARTCODE
+ .uleb128 .LcleanupEND2-.LcleanupSTART2
+ .uleb128 __condvar_cleanup2-.LSTARTCODE
+ .uleb128 0
+#endif
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 8
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
new file mode 100644
index 000000000..a44e7a756
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
@@ -0,0 +1,486 @@
+/* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <tcb-offsets.h>
+#include <pthread-pi-defines.h>
+
+#include <bits/kernel-features.h>
+
+
+ .text
+
+/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
+ .globl __pthread_cond_wait
+ .type __pthread_cond_wait, @function
+ .align 16
+__pthread_cond_wait:
+.LSTARTCODE:
+ cfi_startproc
+
+#define FRAME_SIZE 32
+ leaq -FRAME_SIZE(%rsp), %rsp
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+
+ /* Stack frame:
+
+ rsp + 32
+ +--------------------------+
+ rsp + 24 | old wake_seq value |
+ +--------------------------+
+ rsp + 16 | mutex pointer |
+ +--------------------------+
+ rsp + 8 | condvar pointer |
+ +--------------------------+
+ rsp + 4 | old broadcast_seq value |
+ +--------------------------+
+ rsp + 0 | old cancellation mode |
+ +--------------------------+
+ */
+
+ cmpq $-1, dep_mutex(%rdi)
+
+ /* Prepare structure passed to cancellation handler. */
+ movq %rdi, 8(%rsp)
+ movq %rsi, 16(%rsp)
+
+ je 15f
+ movq %rsi, dep_mutex(%rdi)
+
+ /* Get internal lock. */
+15: movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jne 1f
+
+ /* Unlock the mutex. */
+2: movq 16(%rsp), %rdi
+ xorl %esi, %esi
+ callq __pthread_mutex_unlock_usercnt
+
+ testl %eax, %eax
+ jne 12f
+
+ movq 8(%rsp), %rdi
+ incq total_seq(%rdi)
+ incl cond_futex(%rdi)
+ addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Get and store current wakeup_seq value. */
+ movq 8(%rsp), %rdi
+ movq wakeup_seq(%rdi), %r9
+ movl broadcast_seq(%rdi), %edx
+ movq %r9, 24(%rsp)
+ movl %edx, 4(%rsp)
+
+ /* Unlock. */
+8: movl cond_futex(%rdi), %edx
+ LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 3f
+
+.LcleanupSTART:
+4: callq __pthread_enable_asynccancel
+ movl %eax, (%rsp)
+
+ xorq %r10, %r10
+ cmpq $-1, dep_mutex(%rdi)
+ leaq cond_futex(%rdi), %rdi
+ movl $FUTEX_WAIT, %esi
+ je 60f
+
+ movq dep_mutex-cond_futex(%rdi), %r8
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%r8), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ jne 61f
+
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+ movl $SYS_futex, %eax
+ syscall
+
+ movl $1, %r8d
+#ifdef __ASSUME_REQUEUE_PI
+ jmp 62f
+#else
+ cmpq $-4095, %rax
+ jnae 62f
+
+# ifndef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAIT, %esi
+# endif
+#endif
+
+61:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+#else
+ orl %fs:PRIVATE_FUTEX, %esi
+#endif
+60: xorl %r8d, %r8d
+ movl $SYS_futex, %eax
+ syscall
+
+62: movl (%rsp), %edi
+ callq __pthread_disable_asynccancel
+.LcleanupEND:
+
+ /* Lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jnz 5f
+
+6: movl broadcast_seq(%rdi), %edx
+
+ movq woken_seq(%rdi), %rax
+
+ movq wakeup_seq(%rdi), %r9
+
+ cmpl 4(%rsp), %edx
+ jne 16f
+
+ cmpq 24(%rsp), %r9
+ jbe 8b
+
+ cmpq %rax, %r9
+ jna 8b
+
+ incq woken_seq(%rdi)
+
+ /* Unlock */
+16: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 17f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
+ jne 17f
+
+ addq $cond_nwaiters, %rdi
+ cmpq $-1, dep_mutex-cond_nwaiters(%rdi)
+ movl $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+ subq $cond_nwaiters, %rdi
+
+17: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 10f
+
+ /* If requeue_pi is used the kernel performs the locking of the
+ mutex. */
+11: movq 16(%rsp), %rdi
+ testl %r8d, %r8d
+ jnz 18f
+
+ callq __pthread_mutex_cond_lock
+
+14: leaq FRAME_SIZE(%rsp), %rsp
+ cfi_adjust_cfa_offset(-FRAME_SIZE)
+
+ /* We return the result of the mutex_lock operation. */
+ retq
+
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+
+18: callq __pthread_mutex_cond_lock_adjust
+ xorl %eax, %eax
+ jmp 14b
+
+ /* Initial locking failed. */
+1:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+3:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ /* The call preserves %rdx. */
+ callq __lll_unlock_wake
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 4b
+
+ /* Locking in loop failed. */
+5:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 6b
+
+ /* Unlock after loop requires wakeup. */
+10:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 11b
+
+ /* The initial unlocking of the mutex failed. */
+12: movq %rax, %r10
+ movq 8(%rsp), %rdi
+ LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ je 13f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+
+13: movq %r10, %rax
+ jmp 14b
+ .size __pthread_cond_wait, .-__pthread_cond_wait
+weak_alias(__pthread_cond_wait, pthread_cond_wait)
+
+
+ .align 16
+ .type __condvar_cleanup1, @function
+ .globl __condvar_cleanup1
+ .hidden __condvar_cleanup1
+__condvar_cleanup1:
+ /* Stack frame:
+
+ rsp + 32
+ +--------------------------+
+ rsp + 24 | unused |
+ +--------------------------+
+ rsp + 16 | mutex pointer |
+ +--------------------------+
+ rsp + 8 | condvar pointer |
+ +--------------------------+
+ rsp + 4 | old broadcast_seq value |
+ +--------------------------+
+ rsp + 0 | old cancellation mode |
+ +--------------------------+
+ */
+
+ movq %rax, 24(%rsp)
+
+ /* Get internal lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jz 1f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+
+1: movl broadcast_seq(%rdi), %edx
+ cmpl 4(%rsp), %edx
+ jne 3f
+
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ movq total_seq(%rdi), %rax
+ cmpq wakeup_seq(%rdi), %rax
+ jbe 6f
+ incq wakeup_seq(%rdi)
+ incl cond_futex(%rdi)
+6: incq woken_seq(%rdi)
+
+3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ xorl %ecx, %ecx
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 4f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
+ jne 4f
+
+ cmpq $-1, dep_mutex(%rdi)
+ leaq cond_nwaiters(%rdi), %rdi
+ movl $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+ subq $cond_nwaiters, %rdi
+ movl $1, %ecx
+
+4: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ je 2f
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ /* The call preserves %rcx. */
+ callq __lll_unlock_wake
+
+ /* Wake up all waiters to make sure no signal gets lost. */
+2: testl %ecx, %ecx
+ jnz 5f
+ addq $cond_futex, %rdi
+ cmpq $-1, dep_mutex-cond_futex(%rdi)
+ movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+
+5: movq 16(%rsp), %rdi
+ callq __pthread_mutex_cond_lock
+
+ movq 24(%rsp), %rdi
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ cfi_endproc
+ .size __condvar_cleanup1, .-__condvar_cleanup1
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 __condvar_cleanup1-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 8
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
new file mode 100644
index 000000000..a808b9d7e
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
@@ -0,0 +1,189 @@
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
+#include <lowlevellock.h>
+
+
+ .comm __fork_generation, 4, 4
+
+ .text
+
+
+ .globl __pthread_once
+ .type __pthread_once,@function
+ .align 16
+__pthread_once:
+.LSTARTCODE:
+ cfi_startproc
+ testl $2, (%rdi)
+ jz 1f
+ xorl %eax, %eax
+ retq
+
+ /* Preserve the function pointer. */
+1: pushq %rsi
+ cfi_adjust_cfa_offset(8)
+ xorq %r10, %r10
+
+ /* Not yet initialized or initialization in progress.
+ Get the fork generation counter now. */
+6: movl (%rdi), %eax
+
+5: movl %eax, %edx
+
+ testl $2, %eax
+ jnz 4f
+
+ andl $3, %edx
+ orl __fork_generation(%rip), %edx
+ orl $1, %edx
+
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 5b
+
+ /* Check whether another thread already runs the initializer. */
+ testl $1, %eax
+ jz 3f /* No -> do it. */
+
+ /* Check whether the initializer execution was interrupted
+ by a fork. */
+ xorl %edx, %eax
+ testl $0xfffffffc, %eax
+ jnz 3f /* Different for generation -> run initializer. */
+
+ /* Somebody else got here first. Wait. */
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAIT|FUTEX_PRIVATE_FLAG, %esi
+#else
+# if FUTEX_WAIT == 0
+ movl %fs:PRIVATE_FUTEX, %esi
+# else
+ movl $FUTEX_WAIT, %esi
+ orl %fs:PRIVATE_FUTEX, %esi
+# endif
+#endif
+ movl $SYS_futex, %eax
+ syscall
+ jmp 6b
+
+ /* Preserve the pointer to the control variable. */
+3: pushq %rdi
+ cfi_adjust_cfa_offset(8)
+ pushq %rdi
+ cfi_adjust_cfa_offset(8)
+
+.LcleanupSTART:
+ callq *16(%rsp)
+.LcleanupEND:
+
+ /* Get the control variable address back. */
+ popq %rdi
+ cfi_adjust_cfa_offset(-8)
+
+ /* Sucessful run of the initializer. Signal that we are done. */
+ LOCK
+ incl (%rdi)
+
+ addq $8, %rsp
+ cfi_adjust_cfa_offset(-8)
+
+ /* Wake up all other threads. */
+ movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %esi
+#else
+ movl $FUTEX_WAKE, %esi
+ orl %fs:PRIVATE_FUTEX, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+
+4: addq $8, %rsp
+ cfi_adjust_cfa_offset(-8)
+ xorl %eax, %eax
+ retq
+ .size __pthread_once,.-__pthread_once
+
+
+ .globl __pthread_once_internal
+__pthread_once_internal = __pthread_once
+
+ .globl pthread_once
+pthread_once = __pthread_once
+
+
+ .type clear_once_control,@function
+ .align 16
+clear_once_control:
+ cfi_adjust_cfa_offset(3 * 8)
+ movq (%rsp), %rdi
+ movq %rax, %r8
+ movl $0, (%rdi)
+
+ movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %esi
+#else
+ movl $FUTEX_WAKE, %esi
+ orl %fs:PRIVATE_FUTEX, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+
+ movq %r8, %rdi
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ cfi_endproc
+ .size clear_once_control,.-clear_once_control
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 clear_once_control-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 8
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
new file mode 100644
index 000000000..f36e7a765
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
@@ -0,0 +1,179 @@
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
+
+
+ .text
+
+ .globl __pthread_rwlock_rdlock
+ .type __pthread_rwlock_rdlock,@function
+ .align 16
+__pthread_rwlock_rdlock:
+ cfi_startproc
+ xorq %r10, %r10
+
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: movl WRITER(%rdi), %eax
+ testl %eax, %eax
+ jne 14f
+ cmpl $0, WRITERS_QUEUED(%rdi)
+ je 5f
+ cmpl $0, FLAGS(%rdi)
+ je 5f
+
+3: incl READERS_QUEUED(%rdi)
+ je 4f
+
+ movl READERS_WAKEUP(%rdi), %edx
+
+ LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 10f
+
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+ xorl PSHARED(%rdi), %esi
+#else
+# if FUTEX_WAIT == 0
+ movl PSHARED(%rdi), %esi
+# else
+ movl $FUTEX_WAIT, %esi
+ orl PSHARED(%rdi), %esi
+# endif
+ xorl %fs:PRIVATE_FUTEX, %esi
+#endif
+ addq $READERS_WAKEUP, %rdi
+ movl $SYS_futex, %eax
+ syscall
+
+ subq $READERS_WAKEUP, %rdi
+
+ /* Reget the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 12f
+
+13: decl READERS_QUEUED(%rdi)
+ jmp 2b
+
+5: xorl %edx, %edx
+ incl NR_READERS(%rdi)
+ je 8f
+9: LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 6f
+7:
+
+ movq %rdx, %rax
+ retq
+
+1: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 2b
+
+14: cmpl %fs:TID, %eax
+ jne 3b
+ /* Deadlock detected. */
+ movl $EDEADLK, %edx
+ jmp 9b
+
+6: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_unlock_wake
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 7b
+
+ /* Overflow. */
+8: decl NR_READERS(%rdi)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+ /* Overflow. */
+4: decl READERS_QUEUED(%rdi)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+10: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_unlock_wake
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 11b
+
+12: movl PSHARED(%rdi), %esi
+#if MUTEX == 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 13b
+ cfi_endproc
+ .size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
+
+ .globl pthread_rwlock_rdlock
+pthread_rwlock_rdlock = __pthread_rwlock_rdlock
+
+ .globl __pthread_rwlock_rdlock_internal
+__pthread_rwlock_rdlock_internal = __pthread_rwlock_rdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
new file mode 100644
index 000000000..3629ffbe5
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
@@ -0,0 +1,276 @@
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
+
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+ .text
+
+ .globl pthread_rwlock_timedrdlock
+ .type pthread_rwlock_timedrdlock,@function
+ .align 16
+pthread_rwlock_timedrdlock:
+ cfi_startproc
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r12, 0)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r13, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define VALREG %edx
+#else
+ pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r14, 0)
+
+ subq $16, %rsp
+ cfi_adjust_cfa_offset(16)
+# define VALREG %r14d
+#endif
+
+ movq %rdi, %r12
+ movq %rsi, %r13
+
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: movl WRITER(%r12), %eax
+ testl %eax, %eax
+ jne 14f
+ cmpl $0, WRITERS_QUEUED(%r12)
+ je 5f
+ cmpl $0, FLAGS(%r12)
+ je 5f
+
+ /* Check the value of the timeout parameter. */
+3: cmpq $1000000000, 8(%r13)
+ jae 19f
+
+ incl READERS_QUEUED(%r12)
+ je 4f
+
+ movl READERS_WAKEUP(%r12), VALREG
+
+ /* Unlock. */
+ LOCK
+#if MUTEX == 0
+ decl (%r12)
+#else
+ decl MUTEX(%r12)
+#endif
+ jne 10f
+
+11:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef __PIC__
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+#endif
+
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+ xorl PSHARED(%r12), %esi
+ movq %r13, %r10
+ movl $0xffffffff, %r9d
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ movl %r14d, %edx
+#endif
+21: leaq READERS_WAKEUP(%r12), %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rdx
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .subsection 2
+.Lreltmo:
+ /* Get current time. */
+ movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdi
+ subq (%rsp), %rcx
+ subq %rax, %rdi
+ jns 15f
+ addq $1000000000, %rdi
+ decq %rcx
+15: testq %rcx, %rcx
+ js 16f /* Time is already up. */
+
+ /* Futex call. */
+ movq %rcx, (%rsp) /* Store relative timeout. */
+ movq %rdi, 8(%rsp)
+
+# ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+ xorl PSHARED(%r12), %esi
+# else
+# if FUTEX_WAIT == 0
+ movl PSHARED(%r12), %esi
+# else
+ movl $FUTEX_WAIT, %esi
+ orl PSHARED(%r12), %esi
+# endif
+ xorl %fs:PRIVATE_FUTEX, %esi
+# endif
+ movq %rsp, %r10
+ movl %r14d, %edx
+
+ jmp 21b
+ .previous
+#endif
+
+17: /* Reget the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%r12)
+#else
+ cmpxchgl %esi, MUTEX(%r12)
+#endif
+ jnz 12f
+
+13: decl READERS_QUEUED(%r12)
+ cmpq $-ETIMEDOUT, %rdx
+ jne 2b
+
+18: movl $ETIMEDOUT, %edx
+ jmp 9f
+
+
+5: xorl %edx, %edx
+ incl NR_READERS(%r12)
+ je 8f
+9: LOCK
+#if MUTEX == 0
+ decl (%r12)
+#else
+ decl MUTEX(%r12)
+#endif
+ jne 6f
+
+7: movq %rdx, %rax
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ addq $16, %rsp
+ cfi_adjust_cfa_offset(-16)
+ popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
+#endif
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
+ retq
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+ cfi_adjust_cfa_offset(16)
+ cfi_rel_offset(%r12, 8)
+ cfi_rel_offset(%r13, 0)
+#else
+ cfi_adjust_cfa_offset(40)
+ cfi_offset(%r12, -16)
+ cfi_offset(%r13, -24)
+ cfi_offset(%r14, -32)
+#endif
+1: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_lock_wait
+ jmp 2b
+
+14: cmpl %fs:TID, %eax
+ jne 3b
+ movl $EDEADLK, %edx
+ jmp 9b
+
+6: movl PSHARED(%r12), %esi
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leal MUTEX(%r12), %rdi
+#endif
+ callq __lll_unlock_wake
+ jmp 7b
+
+ /* Overflow. */
+8: decl NR_READERS(%r12)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+ /* Overflow. */
+4: decl READERS_QUEUED(%r12)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+10: movl PSHARED(%r12), %esi
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leaq MUTEX(%r12), %rdi
+#endif
+ callq __lll_unlock_wake
+ jmp 11b
+
+12: movl PSHARED(%r12), %esi
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leaq MUTEX(%r12), %rdi
+#endif
+ callq __lll_lock_wait
+ jmp 13b
+
+16: movq $-ETIMEDOUT, %rdx
+ jmp 17b
+
+19: movl $EINVAL, %edx
+ jmp 9b
+ cfi_endproc
+ .size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
new file mode 100644
index 000000000..23e1ee155
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
@@ -0,0 +1,268 @@
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
+
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+ .text
+
+ .globl pthread_rwlock_timedwrlock
+ .type pthread_rwlock_timedwrlock,@function
+ .align 16
+pthread_rwlock_timedwrlock:
+ cfi_startproc
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r12, 0)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r13, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define VALREG %edx
+#else
+ pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r14, 0)
+
+ subq $16, %rsp
+ cfi_adjust_cfa_offset(16)
+# define VALREG %r14d
+#endif
+
+ movq %rdi, %r12
+ movq %rsi, %r13
+
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: movl WRITER(%r12), %eax
+ testl %eax, %eax
+ jne 14f
+ cmpl $0, NR_READERS(%r12)
+ je 5f
+
+ /* Check the value of the timeout parameter. */
+3: cmpq $1000000000, 8(%r13)
+ jae 19f
+
+ incl WRITERS_QUEUED(%r12)
+ je 4f
+
+ movl WRITERS_WAKEUP(%r12), VALREG
+
+ LOCK
+#if MUTEX == 0
+ decl (%r12)
+#else
+ decl MUTEX(%r12)
+#endif
+ jne 10f
+
+11:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef __PIC__
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+#endif
+
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+ xorl PSHARED(%r12), %esi
+ movq %r13, %r10
+ movl $0xffffffff, %r9d
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ movl %r14d, %edx
+#endif
+21: leaq WRITERS_WAKEUP(%r12), %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rdx
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .subsection 2
+.Lreltmo:
+ /* Get current time. */
+ movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdi
+ subq (%rsp), %rcx
+ subq %rax, %rdi
+ jns 15f
+ addq $1000000000, %rdi
+ decq %rcx
+15: testq %rcx, %rcx
+ js 16f /* Time is already up. */
+
+ /* Futex call. */
+ movq %rcx, (%rsp) /* Store relative timeout. */
+ movq %rdi, 8(%rsp)
+
+# ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+ xorl PSHARED(%r12), %esi
+# else
+# if FUTEX_WAIT == 0
+ movl PSHARED(%r12), %esi
+# else
+ movl $FUTEX_WAIT, %esi
+ orl PSHARED(%r12), %esi
+# endif
+ xorl %fs:PRIVATE_FUTEX, %esi
+# endif
+ movq %rsp, %r10
+ movl %r14d, %edx
+
+ jmp 21b
+ .previous
+#endif
+
+17: /* Reget the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%r12)
+#else
+ cmpxchgl %esi, MUTEX(%r12)
+#endif
+ jnz 12f
+
+13: decl WRITERS_QUEUED(%r12)
+ cmpq $-ETIMEDOUT, %rdx
+ jne 2b
+
+18: movl $ETIMEDOUT, %edx
+ jmp 9f
+
+
+5: xorl %edx, %edx
+ movl %fs:TID, %eax
+ movl %eax, WRITER(%r12)
+9: LOCK
+#if MUTEX == 0
+ decl (%r12)
+#else
+ decl MUTEX(%r12)
+#endif
+ jne 6f
+
+7: movq %rdx, %rax
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+ addq $16, %rsp
+ cfi_adjust_cfa_offset(-16)
+ popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
+#endif
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
+ retq
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+ cfi_adjust_cfa_offset(16)
+ cfi_rel_offset(%r12, 8)
+ cfi_rel_offset(%r13, 0)
+#else
+ cfi_adjust_cfa_offset(40)
+ cfi_offset(%r12, -16)
+ cfi_offset(%r13, -24)
+ cfi_offset(%r14, -32)
+#endif
+1: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_lock_wait
+ jmp 2b
+
+14: cmpl %fs:TID, %eax
+ jne 3b
+20: movl $EDEADLK, %edx
+ jmp 9b
+
+6: movl PSHARED(%r12), %esi
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leal MUTEX(%r12), %rdi
+#endif
+ callq __lll_unlock_wake
+ jmp 7b
+
+ /* Overflow. */
+4: decl WRITERS_QUEUED(%r12)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+10: movl PSHARED(%r12), %esi
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leaq MUTEX(%r12), %rdi
+#endif
+ callq __lll_unlock_wake
+ jmp 11b
+
+12: movl PSHARED(%r12), %esi
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leaq MUTEX(%r12), %rdi
+#endif
+ callq __lll_lock_wait
+ jmp 13b
+
+16: movq $-ETIMEDOUT, %rdx
+ jmp 17b
+
+19: movl $EINVAL, %edx
+ jmp 9b
+ cfi_endproc
+ .size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
new file mode 100644
index 000000000..cfcc7a18c
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
@@ -0,0 +1,130 @@
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <bits/kernel-features.h>
+
+
+ .text
+
+ .globl __pthread_rwlock_unlock
+ .type __pthread_rwlock_unlock,@function
+ .align 16
+__pthread_rwlock_unlock:
+ cfi_startproc
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: cmpl $0, WRITER(%rdi)
+ jne 5f
+ decl NR_READERS(%rdi)
+ jnz 6f
+
+5: movl $0, WRITER(%rdi)
+
+ movl $1, %edx
+ leaq WRITERS_WAKEUP(%rdi), %r10
+ cmpl $0, WRITERS_QUEUED(%rdi)
+ jne 0f
+
+ /* If also no readers waiting nothing to do. */
+ cmpl $0, READERS_QUEUED(%rdi)
+ je 6f
+
+ movl $0x7fffffff, %edx
+ leaq READERS_WAKEUP(%rdi), %r10
+
+0: incl (%r10)
+ LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 7f
+
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAKE, %esi
+ xorl PSHARED(%rdi), %esi
+#else
+ movl $FUTEX_WAKE, %esi
+ orl PSHARED(%rdi), %esi
+ xorl %fs:PRIVATE_FUTEX, %esi
+#endif
+ movl $SYS_futex, %eax
+ movq %r10, %rdi
+ syscall
+
+ xorl %eax, %eax
+ retq
+
+ .align 16
+6: LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 3f
+
+4: xorl %eax, %eax
+ retq
+
+1: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 2b
+
+3: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_unlock_wake
+ jmp 4b
+
+7: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_unlock_wake
+ jmp 8b
+ cfi_endproc
+ .size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
+
+ .globl pthread_rwlock_unlock
+pthread_rwlock_unlock = __pthread_rwlock_unlock
+
+ .globl __pthread_rwlock_unlock_internal
+__pthread_rwlock_unlock_internal = __pthread_rwlock_unlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
new file mode 100644
index 000000000..ccfc11b46
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
@@ -0,0 +1,167 @@
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
+
+
+ .text
+
+ .globl __pthread_rwlock_wrlock
+ .type __pthread_rwlock_wrlock,@function
+ .align 16
+__pthread_rwlock_wrlock:
+ cfi_startproc
+ xorq %r10, %r10
+
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: movl WRITER(%rdi), %eax
+ testl %eax, %eax
+ jne 14f
+ cmpl $0, NR_READERS(%rdi)
+ je 5f
+
+3: incl WRITERS_QUEUED(%rdi)
+ je 4f
+
+ movl WRITERS_WAKEUP(%rdi), %edx
+
+ LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 10f
+
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+ xorl PSHARED(%rdi), %esi
+#else
+# if FUTEX_WAIT == 0
+ movl PSHARED(%rdi), %esi
+# else
+ movl $FUTEX_WAIT, %esi
+ orl PSHARED(%rdi), %esi
+# endif
+ xorl %fs:PRIVATE_FUTEX, %esi
+#endif
+ addq $WRITERS_WAKEUP, %rdi
+ movl $SYS_futex, %eax
+ syscall
+
+ subq $WRITERS_WAKEUP, %rdi
+
+ /* Reget the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 12f
+
+13: decl WRITERS_QUEUED(%rdi)
+ jmp 2b
+
+5: xorl %edx, %edx
+ movl %fs:TID, %eax
+ movl %eax, WRITER(%rdi)
+9: LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 6f
+7:
+
+ movq %rdx, %rax
+ retq
+
+1: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 2b
+
+14: cmpl %fs:TID, %eax
+ jne 3b
+ movl $EDEADLK, %edx
+ jmp 9b
+
+6: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_unlock_wake
+ jmp 7b
+
+4: decl WRITERS_QUEUED(%rdi)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+10: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_unlock_wake
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 11b
+
+12: movl PSHARED(%rdi), %esi
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 13b
+ cfi_endproc
+ .size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
+
+ .globl pthread_rwlock_wrlock
+pthread_rwlock_wrlock = __pthread_rwlock_wrlock
+
+ .globl __pthread_rwlock_wrlock_internal
+__pthread_rwlock_wrlock_internal = __pthread_rwlock_wrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c
new file mode 100644
index 000000000..640d3044f
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c
@@ -0,0 +1,14 @@
+#include <tls.h>
+
+#define RESET_VGETCPU_CACHE() \
+ do { \
+ asm volatile ("movl %0, %%fs:%P1\n\t" \
+ "movl %0, %%fs:%P2" \
+ : \
+ : "ir" (0), "i" (offsetof (struct pthread, \
+ header.vgetcpu_cache[0])), \
+ "i" (offsetof (struct pthread, \
+ header.vgetcpu_cache[1]))); \
+ } while (0)
+
+#include "../pthread_setaffinity.c"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_init.c b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_init.c
new file mode 100644
index 000000000..483de8cac
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_init.c
@@ -0,0 +1 @@
+#include <sysdeps/x86_64/pthread_spin_init.c>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_unlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_unlock.S
new file mode 100644
index 000000000..e8e2ba262
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_unlock.S
@@ -0,0 +1 @@
+#include <sysdeps/x86_64/pthread_spin_unlock.S>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
new file mode 100644
index 000000000..7af6524fe
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
@@ -0,0 +1,89 @@
+/* Copyright (C) 2002, 2003, 2005, 2007, 2008 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+
+
+ .text
+
+ .globl sem_post
+ .type sem_post,@function
+ .align 16
+sem_post:
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
+0: cmpl $SEM_VALUE_MAX, %eax
+ je 3f
+ leal 1(%rax), %esi
+ LOCK
+#if VALUE == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, VALUE(%rdi)
+#endif
+ jnz 0b
+
+ cmpq $0, NWAITERS(%rdi)
+ je 2f
+
+ movl $SYS_futex, %eax
+ movl $FUTEX_WAKE, %esi
+ orl PRIVATE(%rdi), %esi
+ movl $1, %edx
+ syscall
+
+ testq %rax, %rax
+ js 1f
+
+2: xorl %eax, %eax
+ retq
+
+1:
+#if USE___THREAD
+ movl $EINVAL, %eax
+#else
+ callq __errno_location@plt
+ movl $EINVAL, %edx
+#endif
+ jmp 4f
+
+3:
+#if USE___THREAD
+ movl $EOVERFLOW, %eax
+#else
+ callq __errno_location@plt
+ movl $EOVERFLOW, %edx
+#endif
+
+4:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl %eax, %fs:(%rdx)
+#else
+ movl %edx, (%rax)
+#endif
+ orl $-1, %eax
+ retq
+ .size sem_post,.-sem_post
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
new file mode 100644
index 000000000..704a2223a
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
@@ -0,0 +1,379 @@
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+ .text
+
+ .globl sem_timedwait
+ .type sem_timedwait,@function
+ .align 16
+sem_timedwait:
+.LSTARTCODE:
+ cfi_startproc
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
+2: testl %eax, %eax
+ je 1f
+
+ leaq -1(%rax), %rdx
+ LOCK
+#if VALUE == 0
+ cmpxchgl %edx, (%rdi)
+#else
+ cmpxchgl %edx, VALUE(%rdi)
+#endif
+ jne 2b
+
+ xorl %eax, %eax
+ retq
+
+ /* Check whether the timeout value is valid. */
+1: cmpq $1000000000, 8(%rsi)
+ jae 6f
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef __PIC__
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+#endif
+
+ /* This push is only needed to store the sem_t pointer for the
+ exception handler. */
+ pushq %rdi
+ cfi_adjust_cfa_offset(8)
+
+ movq %rsi, %r10
+
+ LOCK
+ addq $1, NWAITERS(%rdi)
+
+.LcleanupSTART:
+13: call __pthread_enable_asynccancel
+ movl %eax, %r8d
+
+#if VALUE != 0
+ leaq VALUE(%rdi), %rdi
+#endif
+ movl $0xffffffff, %r9d
+ movl $FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+ orl PRIVATE(%rdi), %esi
+ movl $SYS_futex, %eax
+ xorl %edx, %edx
+ syscall
+ movq %rax, %r9
+#if VALUE != 0
+ leaq -VALUE(%rdi), %rdi
+#endif
+
+ xchgq %r8, %rdi
+ call __pthread_disable_asynccancel
+.LcleanupEND:
+ movq %r8, %rdi
+
+ testq %r9, %r9
+ je 11f
+ cmpq $-EWOULDBLOCK, %r9
+ jne 3f
+
+11:
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
+14: testl %eax, %eax
+ je 13b
+
+ leaq -1(%rax), %rcx
+ LOCK
+#if VALUE == 0
+ cmpxchgl %ecx, (%rdi)
+#else
+ cmpxchgl %ecx, VALUE(%rdi)
+#endif
+ jne 14b
+
+ xorl %eax, %eax
+
+15: LOCK
+ subq $1, NWAITERS(%rdi)
+
+ leaq 8(%rsp), %rsp
+ cfi_adjust_cfa_offset(-8)
+ retq
+
+ cfi_adjust_cfa_offset(8)
+3: negq %r9
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl %r9d, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl %r9d, (%rax)
+#endif
+
+ orl $-1, %eax
+ jmp 15b
+
+ cfi_adjust_cfa_offset(-8)
+6:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl $EINVAL, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl $EINVAL, (%rax)
+#endif
+
+ orl $-1, %eax
+
+ retq
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r12, 0)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r14, 0)
+
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define STACKFRAME 8
+#else
+# define STACKFRAME 24
+#endif
+ subq $STACKFRAME, %rsp
+ cfi_adjust_cfa_offset(STACKFRAME)
+
+ movq %rdi, %r12
+ movq %rsi, %r13
+
+ LOCK
+ addq $1, NWAITERS(%r12)
+
+7: xorl %esi, %esi
+ movq %rsp, %rdi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 5f
+ addq $1000000000, %rsi
+ decq %rdi
+5: testq %rdi, %rdi
+ movl $ETIMEDOUT, %r14d
+ js 36f /* Time is already up. */
+
+ movq %rdi, (%rsp) /* Store relative timeout. */
+ movq %rsi, 8(%rsp)
+
+.LcleanupSTART2:
+ call __pthread_enable_asynccancel
+ movl %eax, 16(%rsp)
+
+ movq %rsp, %r10
+# if VALUE == 0
+ movq %r12, %rdi
+# else
+ leaq VALUE(%r12), %rdi
+# endif
+# if FUTEX_WAIT == 0
+ movl PRIVATE(%rdi), %esi
+# else
+ movl $FUTEX_WAIT, %esi
+ orl PRIVATE(%rdi), %esi
+# endif
+ movl $SYS_futex, %eax
+ xorl %edx, %edx
+ syscall
+ movq %rax, %r14
+
+ movl 16(%rsp), %edi
+ call __pthread_disable_asynccancel
+.LcleanupEND2:
+
+ testq %r14, %r14
+ je 9f
+ cmpq $-EWOULDBLOCK, %r14
+ jne 33f
+
+9:
+# if VALUE == 0
+ movl (%r12), %eax
+# else
+ movl VALUE(%r12), %eax
+# endif
+8: testl %eax, %eax
+ je 7b
+
+ leaq -1(%rax), %rcx
+ LOCK
+# if VALUE == 0
+ cmpxchgl %ecx, (%r12)
+# else
+ cmpxchgl %ecx, VALUE(%r12)
+# endif
+ jne 8b
+
+ xorl %eax, %eax
+
+45: LOCK
+ subq $1, NWAITERS(%r12)
+
+ addq $STACKFRAME, %rsp
+ cfi_adjust_cfa_offset(-STACKFRAME)
+ popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
+ retq
+
+ cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
+ cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
+ cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
+ cfi_rel_offset(%r14, STACKFRAME)
+33: negq %r14
+36:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl %r14d, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl %r14d, (%rax)
+#endif
+
+ orl $-1, %eax
+ jmp 45b
+#endif
+ cfi_endproc
+ .size sem_timedwait,.-sem_timedwait
+
+
+ .type sem_timedwait_cleanup,@function
+sem_timedwait_cleanup:
+ cfi_startproc
+ cfi_adjust_cfa_offset(8)
+
+ movq (%rsp), %rdi
+ LOCK
+ subq $1, NWAITERS(%rdi)
+ movq %rax, %rdi
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ cfi_endproc
+ .size sem_timedwait_cleanup,.-sem_timedwait_cleanup
+
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .type sem_timedwait_cleanup2,@function
+sem_timedwait_cleanup2:
+ cfi_startproc
+ cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
+ cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
+ cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
+ cfi_rel_offset(%r14, STACKFRAME)
+
+ LOCK
+ subq $1, NWAITERS(%r12)
+ movq %rax, %rdi
+ movq STACKFRAME(%rsp), %r14
+ movq STACKFRAME+8(%rsp), %r13
+ movq STACKFRAME+16(%rsp), %r12
+.LcallUR2:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE2:
+ cfi_endproc
+ .size sem_timedwait_cleanup2,.-sem_timedwait_cleanup2
+#endif
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_timedwait_cleanup-.LSTARTCODE
+ .uleb128 0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .uleb128 .LcleanupSTART2-.LSTARTCODE
+ .uleb128 .LcleanupEND2-.LcleanupSTART2
+ .uleb128 sem_timedwait_cleanup2-.LSTARTCODE
+ .uleb128 0
+#endif
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .uleb128 .LcallUR2-.LSTARTCODE
+ .uleb128 .LENDCODE2-.LcallUR2
+ .uleb128 0
+ .uleb128 0
+#endif
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 8
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
new file mode 100644
index 000000000..7b7f63ddb
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
@@ -0,0 +1,52 @@
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread-errnos.h>
+
+ .text
+
+ .globl sem_trywait
+ .type sem_trywait,@function
+ .align 16
+sem_trywait:
+ movl (%rdi), %eax
+2: testl %eax, %eax
+ jz 1f
+
+ leal -1(%rax), %edx
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jne 2b
+
+ xorl %eax, %eax
+ retq
+
+1:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl $EAGAIN, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl $EAGAIN, (%rax)
+#endif
+ orl $-1, %eax
+ retq
+ .size sem_trywait,.-sem_trywait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
new file mode 100644
index 000000000..f6b39bd74
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
@@ -0,0 +1,174 @@
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+
+
+ .text
+
+ .globl sem_wait
+ .type sem_wait,@function
+ .align 16
+sem_wait:
+.LSTARTCODE:
+ cfi_startproc
+
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
+2: testl %eax, %eax
+ je 1f
+
+ leal -1(%rax), %edx
+ LOCK
+#if VALUE == 0
+ cmpxchgl %edx, (%rdi)
+#else
+ cmpxchgl %edx, VALUE(%rdi)
+#endif
+ jne 2b
+
+ xorl %eax, %eax
+ retq
+
+ /* This push is only needed to store the sem_t pointer for the
+ exception handler. */
+1: pushq %rdi
+ cfi_adjust_cfa_offset(8)
+
+ LOCK
+ addq $1, NWAITERS(%rdi)
+
+.LcleanupSTART:
+6: call __pthread_enable_asynccancel
+ movl %eax, %r8d
+
+ xorq %r10, %r10
+ movl $SYS_futex, %eax
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%rdi), %esi
+#else
+ movl $FUTEX_WAIT, %esi
+ orl PRIVATE(%rdi), %esi
+#endif
+ xorl %edx, %edx
+ syscall
+ movq %rax, %rcx
+
+ xchgq %r8, %rdi
+ call __pthread_disable_asynccancel
+.LcleanupEND:
+ movq %r8, %rdi
+
+ testq %rcx, %rcx
+ je 3f
+ cmpq $-EWOULDBLOCK, %rcx
+ jne 4f
+
+3:
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
+5: testl %eax, %eax
+ je 6b
+
+ leal -1(%rax), %edx
+ LOCK
+#if VALUE == 0
+ cmpxchgl %edx, (%rdi)
+#else
+ cmpxchgl %edx, VALUE(%rdi)
+#endif
+ jne 5b
+
+ xorl %eax, %eax
+
+9: LOCK
+ subq $1, NWAITERS(%rdi)
+
+ leaq 8(%rsp), %rsp
+ cfi_adjust_cfa_offset(-8)
+
+ retq
+
+ cfi_adjust_cfa_offset(8)
+4: negq %rcx
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl %ecx, %fs:(%rdx)
+#else
+# error "not supported. %rcx and %rdi must be preserved"
+ callq __errno_location@plt
+ movl %ecx, (%rax)
+#endif
+ orl $-1, %eax
+
+ jmp 9b
+ .size sem_wait,.-sem_wait
+
+
+ .type sem_wait_cleanup,@function
+sem_wait_cleanup:
+ movq (%rsp), %rdi
+ LOCK
+ subq $1, NWAITERS(%rdi)
+ movq %rax, %rdi
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ cfi_endproc
+ .size sem_wait_cleanup,.-sem_wait_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_wait_cleanup-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 8
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
new file mode 100644
index 000000000..1c93952d4
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
@@ -0,0 +1,111 @@
+/* Copyright (C) 2002-2006, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <tls.h>
+#ifndef __ASSEMBLER__
+# include <pthreadP.h>
+#endif
+
+#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
+
+/* The code to disable cancellation depends on the fact that the called
+ functions are special. They don't modify registers other than %rax
+ and %r11 if they return. Therefore we don't have to preserve other
+ registers around these calls. */
+# undef PSEUDO
+# define PSEUDO(name, syscall_name, args) \
+ .text; \
+ ENTRY (name) \
+ SINGLE_THREAD_P; \
+ jne L(pseudo_cancel); \
+ .type __##syscall_name##_nocancel,@function; \
+ .globl __##syscall_name##_nocancel; \
+ __##syscall_name##_nocancel: \
+ DO_CALL (syscall_name, args); \
+ cmpq $-4095, %rax; \
+ jae SYSCALL_ERROR_LABEL; \
+ ret; \
+ .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
+ L(pseudo_cancel): \
+ /* We always have to align the stack before calling a function. */ \
+ subq $8, %rsp; cfi_adjust_cfa_offset (8); \
+ CENABLE \
+ /* The return value from CENABLE is argument for CDISABLE. */ \
+ movq %rax, (%rsp); \
+ DO_CALL (syscall_name, args); \
+ movq (%rsp), %rdi; \
+ /* Save %rax since it's the error code from the syscall. */ \
+ movq %rax, %rdx; \
+ CDISABLE \
+ movq %rdx, %rax; \
+ addq $8,%rsp; cfi_adjust_cfa_offset (-8); \
+ cmpq $-4095, %rax; \
+ jae SYSCALL_ERROR_LABEL; \
+ L(pseudo_end):
+
+
+# ifdef IS_IN_libpthread
+# define CENABLE call __pthread_enable_asynccancel;
+# define CDISABLE call __pthread_disable_asynccancel;
+# define __local_multiple_threads __pthread_multiple_threads
+# elif !defined NOT_IN_libc
+# define CENABLE call __libc_enable_asynccancel;
+# define CDISABLE call __libc_disable_asynccancel;
+# define __local_multiple_threads __libc_multiple_threads
+# elif defined IS_IN_librt
+# define CENABLE call __librt_enable_asynccancel;
+# define CDISABLE call __librt_disable_asynccancel;
+# else
+# error Unsupported library
+# endif
+
+# if defined IS_IN_libpthread || !defined NOT_IN_libc
+# ifndef __ASSEMBLER__
+extern int __local_multiple_threads attribute_hidden;
+# define SINGLE_THREAD_P \
+ __builtin_expect (__local_multiple_threads == 0, 1)
+# else
+# define SINGLE_THREAD_P cmpl $0, __local_multiple_threads(%rip)
+# endif
+
+# else
+
+# ifndef __ASSEMBLER__
+# define SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+# else
+# define SINGLE_THREAD_P cmpl $0, %fs:MULTIPLE_THREADS_OFFSET
+# endif
+
+# endif
+
+#elif !defined __ASSEMBLER__
+
+# define SINGLE_THREAD_P (1)
+# define NO_CANCELLATION 1
+
+#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/vfork.S
new file mode 100644
index 000000000..9a9912ca8
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/vfork.S
@@ -0,0 +1,43 @@
+/* Copyright (C) 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* We want an #include_next, but we are the main source file.
+ So, #include ourselves and in that incarnation we can use #include_next. */
+#ifndef INCLUDED_SELF
+# define INCLUDED_SELF
+# include <vfork.S>
+#else
+
+# include <tcb-offsets.h>
+
+# define SAVE_PID \
+ movl %fs:PID, %esi; \
+ movl $0x80000000, %ecx; \
+ movl %esi, %edx; \
+ negl %edx; \
+ cmove %ecx, %edx; \
+ movl %edx, %fs:PID
+
+# define RESTORE_PID \
+ testq %rax, %rax; \
+ je 1f; \
+ movl %esi, %fs:PID; \
+1:
+
+# include_next <vfork.S>
+#endif