summaryrefslogtreecommitdiff
path: root/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/nptl/sysdeps/unix/sysv/linux/x86_64')
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile13
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile.arch73
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions7
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h225
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h44
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S116
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/clone.S9
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/compat-timer.h46
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/fork.c31
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S22
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S20
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S22
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S282
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h342
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S306
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/not-cancel.h1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S31
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S159
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S136
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S101
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S468
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S420
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S259
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S177
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S220
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S211
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S129
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S165
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c14
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_init.c1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_unlock.S1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S64
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S174
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S57
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S119
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h111
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep.h259
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/vfork.S43
38 files changed, 4878 insertions, 0 deletions
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile
new file mode 100644
index 000000000..43a6fad84
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile
@@ -0,0 +1,13 @@
+# Makefile for uClibc NPTL
+#
+# Copyright (C) 2005 Steven J. Hill <sjhill@uclibc.org>
+#
+# Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+#
+
+top_srcdir=../../../../../../../
+top_builddir=../../../../../../../
+all: objs
+include $(top_builddir)Rules.mak
+include Makefile.arch
+include $(top_srcdir)Makerules
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile.arch b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile.arch
new file mode 100644
index 000000000..f059e2db2
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Makefile.arch
@@ -0,0 +1,73 @@
+# Makefile for uClibc NPTL
+#
+# Copyright (C) 2006 Steven J. Hill <sjhill@uclibc.org>
+#
+# Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
+#
+LINUX_ARCH_DIR:=$(top_srcdir)libpthread/nptl/sysdeps/unix/sysv/linux/x86_64
+LINUX_ARCH_OUT:=$(top_builddir)libpthread/nptl/sysdeps/unix/sysv/linux/x86_64
+
+
+libpthread_SSRC = pt-vfork.S clone.S pthread_once.S
+libpthread_CSRC = pthread_spin_init.c
+
+libc_a_CSRC = fork.c
+libc_a_SSRC = clone.S vfork.S
+
+libpthread_SSRC += lowlevellock.S pthread_barrier_wait.S pthread_cond_signal.S pthread_cond_broadcast.S \
+ sem_post.S sem_timedwait.S \
+ sem_trywait.S sem_wait.S pthread_rwlock_rdlock.S pthread_rwlock_wrlock.S \
+ pthread_rwlock_timedrdlock.S pthread_rwlock_timedwrlock.S pthread_rwlock_unlock.S \
+ pthread_cond_timedwait.S pthread_cond_wait.S pthread_spin_unlock.S
+
+libc_a_SSRC += libc-lowlevellock.S
+
+
+CFLAGS-OMIT-fork.c = -DNOT_IN_libc=1 -DIS_IN_libpthread=1
+
+ifeq ($(UCLIBC_HAS_STDIO_FUTEXES),y)
+CFLAGS-fork.c = -D__USE_STDIO_FUTEXES__
+endif
+
+ASFLAGS-pt-vfork.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-lowlevellock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-pthread_once.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+
+
+ASFLAGS-clone.S = -D_LIBC_REENTRANT
+ASFLAGS-vfork.S = -D_LIBC_REENTRANT
+ASFLAGS-libc-lowlevellock.S = -D_LIBC_REENTRANT
+
+ifeq ($(UCLIBC_HAS_THREADS_NATIVE),y)
+#Needed to use the correct SYSCALL_ERROR_HANDLER
+ASFLAGS-clone.S += -DUSE___THREAD
+ASFLAGS-vfork.S += -DUSE___THREAD
+endif
+
+CFLAGS += $(SSP_ALL_CFLAGS)
+#CFLAGS:=$(CFLAGS:-O1=-O2)
+
+LINUX_ARCH_OBJ:=$(patsubst %.S,$(LINUX_ARCH_OUT)/%.o,$(libpthread_SSRC))
+LINUX_ARCH_OBJ+=$(patsubst %.c,$(LINUX_ARCH_OUT)/%.o,$(libpthread_CSRC))
+
+ifeq ($(DOPIC),y)
+libpthread-a-y += $(LINUX_ARCH_OBJ:.o=.os)
+else
+libpthread-a-y += $(LINUX_ARCH_OBJ)
+endif
+libpthread-so-y += $(LINUX_ARCH_OBJ:.o=.oS)
+
+libpthread-nomulti-y+=$(LINUX_ARCH_OBJS)
+
+LIBC_LINUX_ARCH_OBJ:=$(patsubst %.c,$(LINUX_ARCH_OUT)/%.o,$(libc_a_CSRC))
+LIBC_LINUX_ARCH_OBJ+=$(patsubst %.S,$(LINUX_ARCH_OUT)/%.o,$(libc_a_SSRC))
+
+libc-static-y+=$(LIBC_LINUX_ARCH_OBJ)
+libc-shared-y+=$(LIBC_LINUX_ARCH_OBJ:.o=.oS)
+
+libc-nomulti-y+=$(LIBC_LINUX_ARCH_OBJ)
+
+objclean-y+=nptl_linux_arch_clean
+
+nptl_linux_arch_clean:
+ $(do_rm) $(addprefix $(LINUX_ARCH_OUT)/*., o os oS)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions
new file mode 100644
index 000000000..3b111ddb5
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions
@@ -0,0 +1,7 @@
+librt {
+ GLIBC_2.3.3 {
+ # Changed timer_t.
+ timer_create; timer_delete; timer_getoverrun; timer_gettime;
+ timer_settime;
+ }
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h
new file mode 100644
index 000000000..7a09c8119
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h
@@ -0,0 +1,225 @@
+/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _BITS_PTHREADTYPES_H
+#define _BITS_PTHREADTYPES_H 1
+
+#include <bits/wordsize.h>
+
+#if __WORDSIZE == 64
+# define __SIZEOF_PTHREAD_ATTR_T 56
+# define __SIZEOF_PTHREAD_MUTEX_T 40
+# define __SIZEOF_PTHREAD_MUTEXATTR_T 4
+# define __SIZEOF_PTHREAD_COND_T 48
+# define __SIZEOF_PTHREAD_CONDATTR_T 4
+# define __SIZEOF_PTHREAD_RWLOCK_T 56
+# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8
+# define __SIZEOF_PTHREAD_BARRIER_T 32
+# define __SIZEOF_PTHREAD_BARRIERATTR_T 4
+#else
+# define __SIZEOF_PTHREAD_ATTR_T 36
+# define __SIZEOF_PTHREAD_MUTEX_T 24
+# define __SIZEOF_PTHREAD_MUTEXATTR_T 4
+# define __SIZEOF_PTHREAD_COND_T 48
+# define __SIZEOF_PTHREAD_CONDATTR_T 4
+# define __SIZEOF_PTHREAD_RWLOCK_T 32
+# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8
+# define __SIZEOF_PTHREAD_BARRIER_T 20
+# define __SIZEOF_PTHREAD_BARRIERATTR_T 4
+#endif
+
+
+/* Thread identifiers. The structure of the attribute type is not
+ exposed on purpose. */
+typedef unsigned long int pthread_t;
+
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_ATTR_T];
+ long int __align;
+} pthread_attr_t;
+
+
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
+/* Data structures for mutex handling. The structure of the attribute
+ type is not exposed on purpose. */
+typedef union
+{
+ struct __pthread_mutex_s
+ {
+ int __lock;
+ unsigned int __count;
+ int __owner;
+#if __WORDSIZE == 64
+ unsigned int __nusers;
+#endif
+ /* KIND must stay at this position in the structure to maintain
+ binary compatibility. */
+ int __kind;
+#if __WORDSIZE == 64
+ int __spins;
+ __pthread_list_t __list;
+# define __PTHREAD_MUTEX_HAVE_PREV 1
+#else
+ unsigned int __nusers;
+ __extension__ union
+ {
+ int __spins;
+ __pthread_slist_t __list;
+ };
+#endif
+ } __data;
+ char __size[__SIZEOF_PTHREAD_MUTEX_T];
+ long int __align;
+} pthread_mutex_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_MUTEXATTR_T];
+ int __align;
+} pthread_mutexattr_t;
+
+
+/* Data structure for conditional variable handling. The structure of
+ the attribute type is not exposed on purpose. */
+typedef union
+{
+ struct
+ {
+ int __lock;
+ unsigned int __futex;
+ __extension__ unsigned long long int __total_seq;
+ __extension__ unsigned long long int __wakeup_seq;
+ __extension__ unsigned long long int __woken_seq;
+ void *__mutex;
+ unsigned int __nwaiters;
+ unsigned int __broadcast_seq;
+ } __data;
+ char __size[__SIZEOF_PTHREAD_COND_T];
+ __extension__ long long int __align;
+} pthread_cond_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_CONDATTR_T];
+ int __align;
+} pthread_condattr_t;
+
+
+/* Keys for thread-specific data */
+typedef unsigned int pthread_key_t;
+
+
+/* Once-only execution */
+typedef int pthread_once_t;
+
+
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K
+/* Data structure for read-write lock variable handling. The
+ structure of the attribute type is not exposed on purpose. */
+typedef union
+{
+# if __WORDSIZE == 64
+ struct
+ {
+ int __lock;
+ unsigned int __nr_readers;
+ unsigned int __readers_wakeup;
+ unsigned int __writer_wakeup;
+ unsigned int __nr_readers_queued;
+ unsigned int __nr_writers_queued;
+ int __writer;
+ int __shared;
+ unsigned long int __pad1;
+ unsigned long int __pad2;
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned int __flags;
+ } __data;
+# else
+ struct
+ {
+ int __lock;
+ unsigned int __nr_readers;
+ unsigned int __readers_wakeup;
+ unsigned int __writer_wakeup;
+ unsigned int __nr_readers_queued;
+ unsigned int __nr_writers_queued;
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned char __flags;
+ unsigned char __shared;
+ unsigned char __pad1;
+ unsigned char __pad2;
+ int __writer;
+ } __data;
+# endif
+ char __size[__SIZEOF_PTHREAD_RWLOCK_T];
+ long int __align;
+} pthread_rwlock_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_RWLOCKATTR_T];
+ long int __align;
+} pthread_rwlockattr_t;
+#endif
+
+
+#ifdef __USE_XOPEN2K
+/* POSIX spinlock data type. */
+typedef volatile int pthread_spinlock_t;
+
+
+/* POSIX barriers data type. The structure of the type is
+ deliberately not exposed. */
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_BARRIER_T];
+ long int __align;
+} pthread_barrier_t;
+
+typedef union
+{
+ char __size[__SIZEOF_PTHREAD_BARRIERATTR_T];
+ int __align;
+} pthread_barrierattr_t;
+#endif
+
+
+#if __WORDSIZE == 32
+/* Extra attributes for the cleanup functions. */
+# define __cleanup_fct_attribute __attribute__ ((__regparm__ (1)))
+#endif
+
+#endif /* bits/pthreadtypes.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
new file mode 100644
index 000000000..57edbbbfb
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
@@ -0,0 +1,44 @@
+/* Copyright (C) 2002, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _SEMAPHORE_H
+# error "Never use <bits/semaphore.h> directly; include <semaphore.h> instead."
+#endif
+
+#include <bits/wordsize.h>
+
+#if __WORDSIZE == 64
+# define __SIZEOF_SEM_T 32
+#else
+# define __SIZEOF_SEM_T 16
+#endif
+
+
+/* Value returned if `sem_open' failed. */
+#define SEM_FAILED ((sem_t *) 0)
+
+/* Maximum value the semaphore can have. */
+#define SEM_VALUE_MAX (2147483647)
+
+
+typedef union
+{
+ char __size[__SIZEOF_SEM_T];
+ long int __align;
+} sem_t;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S
new file mode 100644
index 000000000..680696200
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S
@@ -0,0 +1,116 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+#include "lowlevellock.h"
+
+#ifdef IS_IN_libpthread
+# ifdef SHARED
+# define __pthread_unwind __GI___pthread_unwind
+# endif
+#else
+# ifndef SHARED
+ .weak __pthread_unwind
+# endif
+#endif
+
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+#endif
+
+/* It is crucial that the functions in this file don't modify registers
+ other than %rax and %r11. The syscall wrapper code depends on this
+ because it doesn't explicitly save the other registers which hold
+ relevant values. */
+ .text
+
+ .hidden __pthread_enable_asynccancel
+ENTRY(__pthread_enable_asynccancel)
+ movl %fs:CANCELHANDLING, %eax
+2: movl %eax, %r11d
+ orl $TCB_CANCELTYPE_BITMASK, %r11d
+ cmpl %eax, %r11d
+ je 1f
+
+ lock
+ cmpxchgl %r11d, %fs:CANCELHANDLING
+ jnz 2b
+
+ andl $(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
+ cmpl $(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
+ je 3f
+
+1: ret
+
+3: movq $TCB_PTHREAD_CANCELED, %fs:RESULT
+ lock
+ orl $TCB_EXITING_BITMASK, %fs:CANCELHANDLING
+ movq %fs:CLEANUP_JMP_BUF, %rdi
+#ifdef SHARED
+ call __pthread_unwind@PLT
+#else
+ call __pthread_unwind
+#endif
+ hlt
+END(__pthread_enable_asynccancel)
+
+
+ .hidden __pthread_disable_asynccancel
+ENTRY(__pthread_disable_asynccancel)
+ testl $TCB_CANCELTYPE_BITMASK, %edi
+ jnz 1f
+
+ movl %fs:CANCELHANDLING, %eax
+2: movl %eax, %r11d
+ andl $~TCB_CANCELTYPE_BITMASK, %r11d
+ lock
+ cmpxchgl %r11d, %fs:CANCELHANDLING
+ jnz 2b
+
+ movl %r11d, %eax
+3: andl $(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
+ cmpl $TCB_CANCELING_BITMASK, %eax
+ je 4f
+1: ret
+
+ /* Performance doesn't matter in this loop. We will
+ delay until the thread is canceled. And we will unlikely
+ enter the loop twice. */
+4: movq %fs:0, %rdi
+ movl $__NR_futex, %eax
+ xorq %r10, %r10
+ addq $CANCELHANDLING, %rdi
+ LOAD_PRIVATE_FUTEX_WAIT (%esi)
+ syscall
+ movl %fs:CANCELHANDLING, %eax
+ jmp 3b
+END(__pthread_disable_asynccancel)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/clone.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/clone.S
new file mode 100644
index 000000000..675a997e9
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/clone.S
@@ -0,0 +1,9 @@
+/* We want an #include_next, but we are the main source file.
+ So, #include ourselves and in that incarnation we can use #include_next. */
+#ifndef INCLUDED_SELF
+# define INCLUDED_SELF
+# include <clone.S>
+#else
+# define RESET_PID
+# include_next <clone.S>
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/compat-timer.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/compat-timer.h
new file mode 100644
index 000000000..02485daa5
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/compat-timer.h
@@ -0,0 +1,46 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <signal.h>
+#include <time.h>
+#include <sys/types.h>
+
+#define OLD_TIMER_MAX 256
+
+extern timer_t __compat_timer_list[OLD_TIMER_MAX] attribute_hidden;
+
+
+extern int __timer_create_new (clockid_t clock_id, struct sigevent *evp,
+ timer_t *timerid);
+extern int __timer_delete_new (timer_t timerid);
+extern int __timer_getoverrun_new (timer_t timerid);
+extern int __timer_gettime_new (timer_t timerid, struct itimerspec *value);
+extern int __timer_settime_new (timer_t timerid, int flags,
+ const struct itimerspec *value,
+ struct itimerspec *ovalue);
+
+
+extern int __timer_create_old (clockid_t clock_id, struct sigevent *evp,
+ int *timerid);
+extern int __timer_delete_old (int timerid);
+extern int __timer_getoverrun_old (int timerid);
+extern int __timer_gettime_old (int timerid, struct itimerspec *value);
+extern int __timer_settime_old (int timerid, int flags,
+ const struct itimerspec *value,
+ struct itimerspec *ovalue);
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/fork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/fork.c
new file mode 100644
index 000000000..c828e158d
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/fork.c
@@ -0,0 +1,31 @@
+/* Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sched.h>
+#include <signal.h>
+#include <sysdep.h>
+#include <tls.h>
+
+
+#define ARCH_FORK() \
+ INLINE_SYSCALL (clone, 4, \
+ CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, 0, \
+ NULL, &THREAD_SELF->tid)
+
+#include "../fork.c"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
new file mode 100644
index 000000000..110058850
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
@@ -0,0 +1,22 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#define __pthread_enable_asynccancel __libc_enable_asynccancel
+#define __pthread_disable_asynccancel __libc_disable_asynccancel
+#include "cancellation.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
new file mode 100644
index 000000000..ce8ad27aa
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include "lowlevellock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
new file mode 100644
index 000000000..ce4192b5d
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
@@ -0,0 +1,22 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#define __pthread_enable_asynccancel __librt_enable_asynccancel
+#define __pthread_disable_asynccancel __librt_disable_asynccancel
+#include "cancellation.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
new file mode 100644
index 000000000..1e461ad41
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
@@ -0,0 +1,282 @@
+/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+
+ .text
+
+#ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+
+ .globl __lll_mutex_lock_wait
+ .type __lll_mutex_lock_wait,@function
+ .hidden __lll_mutex_lock_wait
+ .align 16
+__lll_mutex_lock_wait:
+ pushq %r10
+ pushq %rdx
+
+ xorq %r10, %r10 /* No timeout. */
+ movl $2, %edx
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+2: movl %edx, %eax
+ xchgl %eax, (%rdi) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popq %rdx
+ popq %r10
+ retq
+ .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
+
+
+#ifdef NOT_IN_libc
+ .globl __lll_mutex_timedlock_wait
+ .type __lll_mutex_timedlock_wait,@function
+ .hidden __lll_mutex_timedlock_wait
+ .align 16
+__lll_mutex_timedlock_wait:
+ /* Check for a valid timeout value. */
+ cmpq $1000000000, 8(%rdx)
+ jae 3f
+
+ pushq %r8
+ pushq %r9
+ pushq %r12
+ pushq %r13
+ pushq %r14
+
+ /* Stack frame for the timespec and timeval structs. */
+ subq $16, %rsp
+
+ movq %rdi, %r12
+ movq %rdx, %r13
+
+1:
+ /* Get current time. */
+ movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ /* This is a regular function call, all caller-save registers
+ might be clobbered. */
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 4f
+ addq $1000000000, %rsi
+ decq %rdi
+4: testq %rdi, %rdi
+ js 5f /* Time is already up. */
+
+ /* Futex call. */
+ movq %rdi, (%rsp) /* Store relative timeout. */
+ movq %rsi, 8(%rsp)
+
+ movl $1, %eax
+ movl $2, %edx
+ LOCK
+ cmpxchgl %edx, (%r12)
+
+ testl %eax, %eax
+ je 8f
+
+ movq %rsp, %r10
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movq %r12, %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rcx
+
+8: /* NB: %edx == 2 */
+ xorl %eax, %eax
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 7f
+
+6: addq $16, %rsp
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %r9
+ popq %r8
+ retq
+
+ /* Check whether the time expired. */
+7: cmpq $-ETIMEDOUT, %rcx
+ je 5f
+
+ /* Make sure the current holder knows we are going to sleep. */
+ movl %edx, %eax
+ xchgl %eax, (%rdi)
+ testl %eax, %eax
+ jz 6b
+ jmp 1b
+
+3: movl $EINVAL, %eax
+ retq
+
+5: movl $ETIMEDOUT, %eax
+ jmp 6b
+ .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+#endif
+
+
+#ifdef NOT_IN_libc
+ .globl lll_unlock_wake_cb
+ .type lll_unlock_wake_cb,@function
+ .hidden lll_unlock_wake_cb
+ .align 16
+lll_unlock_wake_cb:
+ pushq %rsi
+ pushq %rdx
+
+ LOCK
+ addl $1, (%rdi)
+ jng 1f
+
+ popq %rdx
+ popq %rsi
+ retq
+ .size lll_unlock_wake_cb,.-lll_unlock_wake_cb
+#endif
+
+
+ .globl __lll_mutex_unlock_wake
+ .type __lll_mutex_unlock_wake,@function
+ .hidden __lll_mutex_unlock_wake
+ .align 16
+__lll_mutex_unlock_wake:
+ pushq %rsi
+ pushq %rdx
+
+ movl $0, (%rdi)
+ movl $FUTEX_WAKE, %esi
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ syscall
+
+ popq %rdx
+ popq %rsi
+ retq
+ .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+
+
+#ifdef NOT_IN_libc
+ .globl __lll_timedwait_tid
+ .type __lll_timedwait_tid,@function
+ .hidden __lll_timedwait_tid
+ .align 16
+__lll_timedwait_tid:
+ pushq %r12
+ pushq %r13
+
+ movq %rdi, %r12
+ movq %rsi, %r13
+
+ subq $16, %rsp
+
+ /* Get current time. */
+2: movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 5f
+ addq $1000000000, %rsi
+ decq %rdi
+5: testq %rdi, %rdi
+ js 6f /* Time is already up. */
+
+ movq %rdi, (%rsp) /* Store relative timeout. */
+ movq %rsi, 8(%rsp)
+
+ movl (%r12), %edx
+ testl %edx, %edx
+ jz 4f
+
+ movq %rsp, %r10
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movq %r12, %rdi
+ movl $SYS_futex, %eax
+ syscall
+
+ cmpl $0, (%rdi)
+ jne 1f
+4: xorl %eax, %eax
+
+8: addq $16, %rsp
+ popq %r13
+ popq %r12
+ retq
+
+1: cmpq $-ETIMEDOUT, %rax
+ jne 2b
+
+6: movl $ETIMEDOUT, %eax
+ jmp 8b
+ .size __lll_timedwait_tid,.-__lll_timedwait_tid
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
new file mode 100644
index 000000000..c9f30e962
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -0,0 +1,342 @@
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _LOWLEVELLOCK_H
+#define _LOWLEVELLOCK_H 1
+
+#include <time.h>
+#include <sys/param.h>
+#include <bits/pthreadtypes.h>
+#include <atomic.h>
+#include <sysdep.h>
+
+#ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+
+/* Initializer for compatibility lock. */
+#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
+
+/* Delay in spinlock loop. */
+#define BUSY_WAIT_NOP __asm__ ("rep; nop")
+
+
+#define lll_futex_wait(futex, val) \
+ do { \
+ int __ignore; \
+ register __typeof (val) _val __asm__ ("edx") = (val); \
+ __asm__ __volatile ("xorq %%r10, %%r10\n\t" \
+ "syscall" \
+ : "=a" (__ignore) \
+ : "0" (SYS_futex), "D" (futex), "S" (FUTEX_WAIT), \
+ "d" (_val) \
+ : "memory", "cc", "r10", "r11", "cx"); \
+ } while (0)
+
+
+#define lll_futex_wake(futex, nr) \
+ do { \
+ int __ignore; \
+ register __typeof (nr) _nr __asm__ ("edx") = (nr); \
+ __asm__ __volatile ("syscall" \
+ : "=a" (__ignore) \
+ : "0" (SYS_futex), "D" (futex), "S" (FUTEX_WAKE), \
+ "d" (_nr) \
+ : "memory", "cc", "r10", "r11", "cx"); \
+ } while (0)
+
+
+/* Does not preserve %eax and %ecx. */
+extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden;
+/* Does not preserver %eax, %ecx, and %edx. */
+extern int __lll_mutex_timedlock_wait (int *__futex, int __val,
+ const struct timespec *__abstime)
+ attribute_hidden;
+/* Preserves all registers but %eax. */
+extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
+
+
+/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+ after the cmpxchg instruction. In case the operation succeded this
+ value is zero. In case the operation failed, the cmpxchg instruction
+ has loaded the current value of the memory work which is guaranteed
+ to be nonzero. */
+#define lll_mutex_trylock(futex) \
+ ({ int ret; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
+ "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "memory"); \
+ ret; })
+
+
+#define lll_mutex_cond_trylock(futex) \
+ ({ int ret; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "memory"); \
+ ret; })
+
+
+#define lll_mutex_lock(futex) \
+ (void) ({ int ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n" \
+ "1:\tleaq %2, %%rdi\n\t" \
+ "subq $128, %%rsp\n\t" \
+ "callq __lll_mutex_lock_wait\n\t" \
+ "addq $128, %%rsp\n\t" \
+ "jmp 2f\n\t" \
+ ".previous\n" \
+ "2:" \
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); })
+
+
+#define lll_mutex_cond_lock(futex) \
+ (void) ({ int ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n" \
+ "1:\tleaq %2, %%rdi\n\t" \
+ "subq $128, %%rsp\n\t" \
+ "callq __lll_mutex_lock_wait\n\t" \
+ "addq $128, %%rsp\n\t" \
+ "jmp 2f\n\t" \
+ ".previous\n" \
+ "2:" \
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
+ "=a" (ignore3) \
+ : "0" (2), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); })
+
+
+#define lll_mutex_timedlock(futex, timeout) \
+ ({ int _result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n" \
+ "1:\tleaq %4, %%rdi\n\t" \
+ "movq %8, %%rdx\n\t" \
+ "subq $128, %%rsp\n\t" \
+ "callq __lll_mutex_timedlock_wait\n\t" \
+ "addq $128, %%rsp\n\t" \
+ "jmp 2f\n\t" \
+ ".previous\n" \
+ "2:" \
+ : "=a" (_result), "=&D" (ignore1), "=S" (ignore2), \
+ "=&d" (ignore3), "=m" (futex) \
+ : "0" (0), "2" (1), "m" (futex), "m" (timeout) \
+ : "memory", "cx", "cc", "r10", "r11"); \
+ _result; })
+
+
+#define lll_mutex_unlock(futex) \
+ (void) ({ int ignore; \
+ __asm__ __volatile (LOCK_INSTR "decl %0\n\t" \
+ "jne 1f\n\t" \
+ ".subsection 1\n" \
+ "1:\tleaq %0, %%rdi\n\t" \
+ "subq $128, %%rsp\n\t" \
+ "callq __lll_mutex_unlock_wake\n\t" \
+ "addq $128, %%rsp\n\t" \
+ "jmp 2f\n\t" \
+ ".previous\n" \
+ "2:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex) \
+ : "ax", "cx", "r11", "cc", "memory"); })
+
+
+#define lll_mutex_islocked(futex) \
+ (futex != LLL_MUTEX_LOCK_INITIALIZER)
+
+
+/* We have a separate internal lock implementation which is not tied
+ to binary compatibility. */
+
+/* Type for lock object. */
+typedef int lll_lock_t;
+
+/* Initializers for lock. */
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+
+
+extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
+
+
+/* The states of a lock are:
+ 0 - untaken
+ 1 - taken by one user
+ 2 - taken by more users */
+
+
+#if defined NOT_IN_libc || defined UP
+# define lll_trylock(futex) lll_mutex_trylock (futex)
+# define lll_lock(futex) lll_mutex_lock (futex)
+# define lll_unlock(futex) lll_mutex_unlock (futex)
+#else
+/* Special versions of the macros for use in libc itself. They avoid
+ the lock prefix when the thread library is not used.
+
+ The code sequence to avoid unnecessary lock prefixes is what the AMD
+ guys suggested. If you do not like it, bring it up with AMD.
+
+ XXX In future we might even want to avoid it on UP machines. */
+
+# define lll_trylock(futex) \
+ ({ unsigned char ret; \
+ __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %2, %1\n\t" \
+ "jmp 1f\n" \
+ "0:\tcmpxchgl %2, %1\n\t" \
+ "1:setne %0" \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
+ "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "memory"); \
+ ret; })
+
+
+# define lll_lock(futex) \
+ (void) ({ int ignore1, ignore2, ignore3; \
+ __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %0, %2\n\t" \
+ "jnz 1f\n\t" \
+ "jmp 2f\n" \
+ "0:\tcmpxchgl %0, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n" \
+ "1:\tleaq %2, %%rdi\n\t" \
+ "subq $128, %%rsp\n\t" \
+ "callq __lll_mutex_lock_wait\n\t" \
+ "addq $128, %%rsp\n\t" \
+ "jmp 2f\n\t" \
+ ".previous\n" \
+ "2:" \
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); })
+
+
+# define lll_unlock(futex) \
+ (void) ({ int ignore; \
+ __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; decl %0\n\t" \
+ "jne 1f\n\t" \
+ "jmp 2f\n" \
+ "0:\tdecl %0\n\t" \
+ "jne 1f\n\t" \
+ ".subsection 1\n" \
+ "1:\tleaq %0, %%rdi\n\t" \
+ "subq $128, %%rsp\n\t" \
+ "callq __lll_mutex_unlock_wake\n\t" \
+ "addq $128, %%rsp\n\t" \
+ "jmp 2f\n\t" \
+ ".previous\n" \
+ "2:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex) \
+ : "ax", "cx", "r11", "cc", "memory"); })
+#endif
+
+
+#define lll_islocked(futex) \
+ (futex != LLL_MUTEX_LOCK_INITIALIZER)
+
+
+/* The kernel notifies a process with uses CLONE_CLEARTID via futex
+ wakeup when the clone terminates. The memory location contains the
+ thread ID while the clone is running and is reset to zero
+ afterwards.
+
+ The macro parameter must not have any side effect. */
+#define lll_wait_tid(tid) \
+ do { \
+ int __ignore; \
+ register __typeof (tid) _tid __asm__ ("edx") = (tid); \
+ if (_tid != 0) \
+ __asm__ __volatile ("xorq %%r10, %%r10\n\t" \
+ "1:\tmovq %2, %%rax\n\t" \
+ "syscall\n\t" \
+ "cmpl $0, (%%rdi)\n\t" \
+ "jne 1b" \
+ : "=&a" (__ignore) \
+ : "S" (FUTEX_WAIT), "i" (SYS_futex), "D" (&tid), \
+ "d" (_tid) \
+ : "memory", "cc", "r10", "r11", "cx"); \
+ } while (0)
+
+extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
+ attribute_hidden;
+#define lll_timedwait_tid(tid, abstime) \
+ ({ \
+ int __result = 0; \
+ if (tid != 0) \
+ { \
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) \
+ __result = EINVAL; \
+ else \
+ __result = __lll_timedwait_tid (&tid, abstime); \
+ } \
+ __result; })
+
+
+/* Conditional variable handling. */
+
+extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
+extern int __lll_cond_timedwait (pthread_cond_t *cond,
+ const struct timespec *abstime)
+ attribute_hidden;
+extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
+extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
+
+
+#define lll_cond_wait(cond) \
+ __lll_cond_wait (cond)
+#define lll_cond_timedwait(cond, abstime) \
+ __lll_cond_timedwait (cond, abstime)
+#define lll_cond_wake(cond) \
+ __lll_cond_wake (cond)
+#define lll_cond_broadcast(cond) \
+ __lll_cond_broadcast (cond)
+
+
+#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
new file mode 100644
index 000000000..4b434632e
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
@@ -0,0 +1,306 @@
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <kernel-features.h>
+
+ .text
+
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+#endif
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
+ .align 16
+__lll_robust_lock_wait:
+ cfi_startproc
+ pushq %r10
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
+
+ xorq %r10, %r10 /* No timeout. */
+ LOAD_FUTEX_WAIT (%esi)
+
+4: movl %eax, %edx
+ orl $FUTEX_WAITERS, %edx
+
+ testl $FUTEX_OWNER_DIED, %eax
+ jnz 3f
+
+ cmpl %edx, %eax
+ je 1f
+
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+ movl (%rdi), %eax
+
+2: testl %eax, %eax
+ jne 4b
+
+ movl %fs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 4b
+ /* NB: %rax == 0 */
+
+3: popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
+ retq
+ cfi_endproc
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
+ .align 16
+__lll_robust_timedlock_wait:
+ cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef __PIC__
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r9, 0)
+ movq %rdx, %r10
+ movl $0xffffffff, %r9d
+ LOAD_FUTEX_WAIT_ABS (%esi)
+
+1: testl $FUTEX_OWNER_DIED, %eax
+ jnz 3f
+
+ movl %eax, %edx
+ orl $FUTEX_WAITERS, %edx
+
+ cmpl %eax, %edx
+ je 5f
+
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ movq $0, %rcx /* Must use mov to avoid changing cc. */
+ jnz 6f
+
+5: movl $SYS_futex, %eax
+ syscall
+ movl %eax, %ecx
+
+ movl (%rdi), %eax
+
+6: testl %eax, %eax
+ jne 2f
+
+ movl %fs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 2f
+
+3: popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ retq
+
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r9, 0)
+ /* Check whether the time expired. */
+2: cmpl $-ETIMEDOUT, %ecx
+ je 4f
+ cmpl $-EINVAL, %ecx
+ jne 1b
+
+4: movl %ecx, %eax
+ negl %eax
+ jmp 3b
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+ /* Check for a valid timeout value. */
+ cmpq $1000000000, 8(%rdx)
+ jae 3f
+
+ pushq %r8
+ cfi_adjust_cfa_offset(8)
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r8, -16)
+ cfi_offset(%r9, -24)
+ cfi_offset(%r12, -32)
+ cfi_offset(%r13, -40)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
+
+ /* Stack frame for the timespec and timeval structs. */
+ subq $32, %rsp
+ cfi_adjust_cfa_offset(32)
+
+ movq %rdi, %r12
+ movq %rdx, %r13
+
+1: movq %rax, 16(%rsp)
+
+ /* Get current time. */
+ movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ /* This is a regular function call, all caller-save registers
+ might be clobbered. */
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 4f
+ addq $1000000000, %rsi
+ decq %rdi
+4: testq %rdi, %rdi
+ js 8f /* Time is already up. */
+
+ /* Futex call. */
+ movq %rdi, (%rsp) /* Store relative timeout. */
+ movq %rsi, 8(%rsp)
+
+ movq 16(%rsp), %rdx
+ movl %edx, %eax
+ orl $FUTEX_WAITERS, %edx
+
+ testl $FUTEX_OWNER_DIED, %eax
+ jnz 6f
+
+ cmpl %eax, %edx
+ je 2f
+
+ LOCK
+ cmpxchgl %edx, (%r12)
+ movq $0, %rcx /* Must use mov to avoid changing cc. */
+ jnz 5f
+
+2: movq %rsp, %r10
+ movl 32(%rsp), %esi
+ LOAD_FUTEX_WAIT (%esi)
+ movq %r12, %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rcx
+
+ movl (%r12), %eax
+
+5: testl %eax, %eax
+ jne 7f
+
+ movl %fs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%r12)
+ jnz 7f
+
+6: addq $40, %rsp
+ cfi_adjust_cfa_offset(-40)
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
+ popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ popq %r8
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r8)
+ retq
+
+3: movl $EINVAL, %eax
+ retq
+
+ cfi_adjust_cfa_offset(72)
+ cfi_offset(%r8, -16)
+ cfi_offset(%r9, -24)
+ cfi_offset(%r12, -32)
+ cfi_offset(%r13, -40)
+ /* Check whether the time expired. */
+7: cmpl $-ETIMEDOUT, %ecx
+ jne 1b
+
+8: movl $ETIMEDOUT, %eax
+ jmp 6b
+#endif
+ cfi_endproc
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/not-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/not-cancel.h
new file mode 100644
index 000000000..acf1a617e
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/not-cancel.h
@@ -0,0 +1 @@
+#include "../i386/not-cancel.h"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S
new file mode 100644
index 000000000..c20ef73e7
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S
@@ -0,0 +1,31 @@
+/* Copyright (C) 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#define SAVE_PID \
+ movl %fs:PID, %esi; \
+ movl %esi, %edx; \
+ negl %edx; \
+ movl %edx, %fs:PID
+
+#define RESTORE_PID \
+ testq %rax, %rax; \
+ je 1f; \
+ movl %esi, %fs:PID; \
+1:
+
+#include <../../../../../../../libc/sysdeps/linux/x86_64/vfork.S>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
new file mode 100644
index 000000000..f6e15a2d7
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
@@ -0,0 +1,159 @@
+/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelbarrier.h>
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+#ifndef UP
+# define LOCK lock
+#else
+# define LOCK
+#endif
+
+
+ .text
+
+ .globl pthread_barrier_wait
+ .type pthread_barrier_wait,@function
+ .align 16
+pthread_barrier_wait:
+ /* Get the mutex. */
+ xorl %eax, %eax
+ movl $1, %esi
+ LOCK
+ cmpxchgl %esi, MUTEX(%rdi)
+ jnz 1f
+
+ /* One less waiter. If this was the last one needed wake
+ everybody. */
+2: decl LEFT(%rdi)
+ je 3f
+
+ /* There are more threads to come. */
+#if CURR_EVENT == 0
+ movl (%rdi), %edx
+#else
+ movl CURR_EVENT(%rdi), %edx
+#endif
+
+ /* Release the mutex. */
+ LOCK
+ decl MUTEX(%rdi)
+ jne 6f
+
+ /* Wait for the remaining threads. The call will return immediately
+ if the CURR_EVENT memory has meanwhile been changed. */
+7:
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ xorq %r10, %r10
+8: movl $SYS_futex, %eax
+ syscall
+
+ /* Don't return on spurious wakeups. The syscall does not change
+ any register except %eax so there is no need to reload any of
+ them. */
+#if CURR_EVENT == 0
+ cmpl %edx, (%rdi)
+#else
+ cmpl %edx, CURR_EVENT(%rdi)
+#endif
+ je 8b
+
+ /* Increment LEFT. If this brings the count back to the
+ initial count unlock the object. */
+ movl $1, %edx
+ movl INIT_COUNT(%rdi), %eax
+ LOCK
+ xaddl %edx, LEFT(%rdi)
+ subl $1, %eax
+ cmpl %eax, %edx
+ jne,pt 10f
+
+ /* Release the mutex. We cannot release the lock before
+ waking the waiting threads since otherwise a new thread might
+ arrive and gets waken up, too. */
+ LOCK
+ decl MUTEX(%rdi)
+ jne 9f
+
+10: xorl %eax, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
+
+ retq
+
+ /* The necessary number of threads arrived. */
+3:
+#if CURR_EVENT == 0
+ incl (%rdi)
+#else
+ incl CURR_EVENT(%rdi)
+#endif
+
+ /* Wake up all waiters. The count is a signed number in the kernel
+ so 0x7fffffff is the highest value. */
+ movl $0x7fffffff, %edx
+ movl $FUTEX_WAKE, %esi
+ movl $SYS_futex, %eax
+ syscall
+
+ /* Increment LEFT. If this brings the count back to the
+ initial count unlock the object. */
+ movl $1, %edx
+ movl INIT_COUNT(%rdi), %eax
+ LOCK
+ xaddl %edx, LEFT(%rdi)
+ subl $1, %eax
+ cmpl %eax, %edx
+ jne,pt 5f
+
+ /* Release the mutex. We cannot release the lock before
+ waking the waiting threads since otherwise a new thread might
+ arrive and gets waken up, too. */
+ LOCK
+ decl MUTEX(%rdi)
+ jne 4f
+
+5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
+
+ retq
+
+1: addq $MUTEX, %rdi
+ callq __lll_mutex_lock_wait
+ subq $MUTEX, %rdi
+ jmp 2b
+
+4: addq $MUTEX, %rdi
+ callq __lll_mutex_unlock_wake
+ jmp 5b
+
+6: addq $MUTEX, %rdi
+ callq __lll_mutex_unlock_wake
+ subq $MUTEX, %rdi
+ jmp 7b
+
+9: addq $MUTEX, %rdi
+ callq __lll_mutex_unlock_wake
+ jmp 10b
+ .size pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
new file mode 100644
index 000000000..d8ebdfab8
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
@@ -0,0 +1,136 @@
+/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelcond.h>
+#include <bits/kernel-features.h>
+
+#ifdef UP
+# define LOCK
+#else
+# define LOCK lock
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+#define FUTEX_REQUEUE 3
+#define FUTEX_CMP_REQUEUE 4
+
+#define EINVAL 22
+
+
+ .text
+
+ /* int pthread_cond_broadcast (pthread_cond_t *cond) */
+ .globl __pthread_cond_broadcast
+ .type __pthread_cond_broadcast, @function
+ .align 16
+__pthread_cond_broadcast:
+
+ /* Get internal lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jnz 1f
+
+2: addq $cond_futex, %rdi
+ movq total_seq-cond_futex(%rdi), %r9
+ cmpq wakeup_seq-cond_futex(%rdi), %r9
+ jna 4f
+
+ /* Cause all currently waiting threads to recognize they are
+ woken up. */
+ movq %r9, wakeup_seq-cond_futex(%rdi)
+ movq %r9, woken_seq-cond_futex(%rdi)
+ addq %r9, %r9
+ movl %r9d, (%rdi)
+ incl broadcast_seq-cond_futex(%rdi)
+
+ /* Get the address of the mutex used. */
+ movq dep_mutex-cond_futex(%rdi), %r8
+
+ /* Unlock. */
+ LOCK
+ decl cond_lock-cond_futex(%rdi)
+ jne 7f
+
+8: cmpq $-1, %r8
+ je 9f
+
+ /* Wake up all threads. */
+ movl $FUTEX_CMP_REQUEUE, %esi
+ movl $SYS_futex, %eax
+ movl $1, %edx
+ movl $0x7fffffff, %r10d
+ syscall
+
+ /* For any kind of error, which mainly is EAGAIN, we try again
+ with WAKE. The general test also covers running on old
+ kernels. */
+ cmpq $-4095, %rax
+ jae 9f
+
+10: xorl %eax, %eax
+ retq
+
+ .align 16
+ /* Unlock. */
+4: LOCK
+ decl cond_lock-cond_futex(%rdi)
+ jne 5f
+
+6: xorl %eax, %eax
+ retq
+
+ /* Initial locking failed. */
+1:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+5: addq $cond_lock-cond_futex, %rdi
+ callq __lll_mutex_unlock_wake
+ jmp 6b
+
+ /* Unlock in loop requires wakeup. */
+7: addq $cond_lock-cond_futex, %rdi
+ callq __lll_mutex_unlock_wake
+ subq $cond_lock-cond_futex, %rdi
+ jmp 8b
+
+9: /* The futex requeue functionality is not available. */
+ movl $0x7fffffff, %edx
+ movl $FUTEX_WAKE, %esi
+ movl $SYS_futex, %eax
+ syscall
+ jmp 10b
+ .size __pthread_cond_broadcast, .-__pthread_cond_broadcast
+weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
new file mode 100644
index 000000000..c7cc3ddd8
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
@@ -0,0 +1,101 @@
+/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelcond.h>
+#include <bits/kernel-features.h>
+
+#ifdef UP
+# define LOCK
+#else
+# define LOCK lock
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+#define FUTEX_REQUEUE 3
+
+#define EINVAL 22
+
+
+ .text
+
+ /* int pthread_cond_signal (pthread_cond_t *cond) */
+ .globl __pthread_cond_signal
+ .type __pthread_cond_signal, @function
+ .align 16
+__pthread_cond_signal:
+
+ /* Get internal lock. */
+ movq %rdi, %r8
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jnz 1f
+
+2: addq $cond_futex, %rdi
+ movq total_seq(%r8), %rcx
+ cmpq wakeup_seq(%r8), %rcx
+ jbe 4f
+
+ /* Bump the wakeup number. */
+ addq $1, wakeup_seq(%r8)
+ addl $1, (%rdi)
+
+ /* Wake up one thread. */
+ movl $FUTEX_WAKE, %esi
+ movl $SYS_futex, %eax
+ movl $1, %edx
+ syscall
+
+ /* Unlock. */
+4: LOCK
+#if cond_lock == 0
+ decl (%r8)
+#else
+ decl cond_lock(%r8)
+#endif
+ jne 5f
+
+6: xorl %eax, %eax
+ retq
+
+ /* Initial locking failed. */
+1:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+5:
+ movq %r8, %rdi
+ callq __lll_mutex_unlock_wake
+ jmp 6b
+ .size __pthread_cond_signal, .-__pthread_cond_signal
+weak_alias(__pthread_cond_signal, pthread_cond_signal)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
new file mode 100644
index 000000000..f0dcdb750
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -0,0 +1,468 @@
+/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelcond.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+
+#ifdef UP
+# define LOCK
+#else
+# define LOCK lock
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+
+ .text
+
+/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime) */
+ .globl __pthread_cond_timedwait
+ .type __pthread_cond_timedwait, @function
+ .align 16
+__pthread_cond_timedwait:
+.LSTARTCODE:
+ pushq %r12
+.Lpush_r12:
+ pushq %r13
+.Lpush_r13:
+ pushq %r14
+.Lpush_r14:
+#define FRAME_SIZE 80
+ subq $FRAME_SIZE, %rsp
+.Lsubq:
+
+ cmpq $1000000000, 8(%rdx)
+ movl $EINVAL, %eax
+ jae 18f
+
+ /* Stack frame:
+
+ rsp + 80
+ +--------------------------+
+ rsp + 48 | cleanup buffer |
+ +--------------------------+
+ rsp + 40 | old wake_seq value |
+ +--------------------------+
+ rsp + 24 | timeout value |
+ +--------------------------+
+ rsp + 16 | mutex pointer |
+ +--------------------------+
+ rsp + 8 | condvar pointer |
+ +--------------------------+
+ rsp + 4 | old broadcast_seq value |
+ +--------------------------+
+ rsp + 0 | old cancellation mode |
+ +--------------------------+
+ */
+
+ cmpq $-1, dep_mutex(%rdi)
+
+ /* Prepare structure passed to cancellation handler. */
+ movq %rdi, 8(%rsp)
+ movq %rsi, 16(%rsp)
+ movq %rdx, %r13
+
+ je 22f
+ movq %rsi, dep_mutex(%rdi)
+
+ /* Get internal lock. */
+22: movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jnz 1f
+
+ /* Unlock the mutex. */
+2: movq 16(%rsp), %rdi
+ xorl %esi, %esi
+ callq __pthread_mutex_unlock_usercnt
+
+ testl %eax, %eax
+ jne 16f
+
+ movq 8(%rsp), %rdi
+ incq total_seq(%rdi)
+ incl cond_futex(%rdi)
+ addl $(1 << clock_bits), cond_nwaiters(%rdi)
+
+ /* Install cancellation handler. */
+#ifdef __PIC__
+ leaq __condvar_cleanup(%rip), %rsi
+#else
+ leaq __condvar_cleanup, %rsi
+#endif
+ leaq 48(%rsp), %rdi
+ movq %rsp, %rdx
+ callq __pthread_cleanup_push
+
+ /* Get and store current wakeup_seq value. */
+ movq 8(%rsp), %rdi
+ movq wakeup_seq(%rdi), %r9
+ movl broadcast_seq(%rdi), %edx
+ movq %r9, 40(%rsp)
+ movl %edx, 4(%rsp)
+
+ /* Get the current time. */
+8:
+#ifdef __NR_clock_gettime
+ /* Get the clock number. Note that the field in the condvar
+ structure stores the number minus 1. */
+ movq 8(%rsp), %rdi
+ movl cond_nwaiters(%rdi), %edi
+ andl $((1 << clock_bits) - 1), %edi
+ /* Only clocks 0 and 1 are allowed so far. Both are handled in the
+ kernel. */
+ leaq 24(%rsp), %rsi
+ movl $__NR_clock_gettime, %eax
+ syscall
+# ifndef __ASSUME_POSIX_TIMERS
+ cmpq $-ENOSYS, %rax
+ je 19f
+# endif
+
+ /* Compute relative timeout. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdx
+ subq 24(%rsp), %rcx
+ subq 32(%rsp), %rdx
+#else
+ leaq 24(%rsp), %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 32(%rsp), %rax
+ movl $1000, %edx
+ mul %rdx /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdx
+ subq 24(%rsp), %rcx
+ subq %rax, %rdx
+#endif
+ jns 12f
+ addq $1000000000, %rdx
+ decq %rcx
+12: testq %rcx, %rcx
+ movq 8(%rsp), %rdi
+ movq $-ETIMEDOUT, %r14
+ js 6f
+
+ /* Store relative timeout. */
+21: movq %rcx, 24(%rsp)
+ movq %rdx, 32(%rsp)
+
+ movl cond_futex(%rdi), %r12d
+
+ /* Unlock. */
+ LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 3f
+
+4: callq __pthread_enable_asynccancel
+ movl %eax, (%rsp)
+
+ leaq 24(%rsp), %r10
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movq %r12, %rdx
+ addq $cond_futex, %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %r14
+
+ movl (%rsp), %edi
+ callq __pthread_disable_asynccancel
+
+ /* Lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jne 5f
+
+6: movl broadcast_seq(%rdi), %edx
+
+ movq woken_seq(%rdi), %rax
+
+ movq wakeup_seq(%rdi), %r9
+
+ cmpl 4(%rsp), %edx
+ jne 23f
+
+ cmpq 40(%rsp), %r9
+ jbe 15f
+
+ cmpq %rax, %r9
+ ja 9f
+
+15: cmpq $-ETIMEDOUT, %r14
+ jne 8b
+
+13: incq wakeup_seq(%rdi)
+ incl cond_futex(%rdi)
+ movl $ETIMEDOUT, %r14d
+ jmp 14f
+
+23: xorq %r14, %r14
+ jmp 24f
+
+9: xorq %r14, %r14
+14: incq woken_seq(%rdi)
+
+24: subl $(1 << clock_bits), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 25f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << clock_bits) - 1), %eax
+ jne 25f
+
+ addq $cond_nwaiters, %rdi
+ movl $SYS_futex, %eax
+ movl $FUTEX_WAKE, %esi
+ movl $1, %edx
+ syscall
+ subq $cond_nwaiters, %rdi
+
+25: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 10f
+
+ /* Remove cancellation handler. */
+11: movq 48+CLEANUP_PREV(%rsp), %rdx
+ movq %rdx, %fs:CLEANUP
+
+ movq 16(%rsp), %rdi
+ callq __pthread_mutex_cond_lock
+
+ testq %rax, %rax
+ cmoveq %r14, %rax
+
+18: addq $FRAME_SIZE, %rsp
+.Laddq:
+ popq %r14
+.Lpop_r14:
+ popq %r13
+.Lpop_r13:
+ popq %r12
+.Lpop_r12:
+
+ retq
+
+ /* Initial locking failed. */
+1:
+.LSbl1:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+3:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 4b
+
+ /* Locking in loop failed. */
+5:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 6b
+
+ /* Unlock after loop requires wakeup. */
+10:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 11b
+
+ /* The initial unlocking of the mutex failed. */
+16: movq 8(%rsp), %rdi
+ movq %rax, (%rsp)
+ LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 17f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+
+17: movq (%rsp), %rax
+ jmp 18b
+
+#if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
+ /* clock_gettime not available. */
+19: leaq 24(%rsp), %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 32(%rsp), %rax
+ movl $1000, %edx
+ mul %rdx /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdx
+ subq 24(%rsp), %rcx
+ subq %rax, %rdx
+ jns 20f
+ addq $1000000000, %rdx
+ decq %rcx
+20: testq %rcx, %rcx
+ movq 8(%rsp), %rdi
+ movq $-ETIMEDOUT, %r14
+ js 6b
+ jmp 21b
+#endif
+.LENDCODE:
+ .size __pthread_cond_timedwait, .-__pthread_cond_timedwait
+weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .long .LENDCIE-.LSTARTCIE # Length of the CIE.
+.LSTARTCIE:
+ .long 0 # CIE ID.
+ .byte 1 # Version number.
+#ifdef SHARED
+ .string "zR" # NUL-terminated augmentation
+ # string.
+#else
+ .ascii "\0" # NUL-terminated augmentation
+ # string.
+#endif
+ .uleb128 1 # Code alignment factor.
+ .sleb128 -8 # Data alignment factor.
+ .byte 16 # Return address register
+ # column.
+#ifdef SHARED
+ .uleb128 1 # Augmentation value length.
+ .byte 0x1b # Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+#endif
+ .byte 0x0c # DW_CFA_def_cfa
+ .uleb128 7
+ .uleb128 8
+ .byte 0x90 # DW_CFA_offset, column 0x8
+ .uleb128 1
+ .align 8
+.LENDCIE:
+
+ .long .LENDFDE-.LSTARTFDE # Length of the FDE.
+.LSTARTFDE:
+ .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
+#ifdef SHARED
+ .long .LSTARTCODE-. # PC-relative start address
+ # of the code
+#else
+ .long .LSTARTCODE # Start address of the code.
+#endif
+ .long .LENDCODE-.LSTARTCODE # Length of the code.
+#ifdef SHARED
+ .uleb128 0 # No augmentation data.
+#endif
+ .byte 0x40+.Lpush_r12-.LSTARTCODE # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x8c # DW_CFA_offset %r12
+ .uleb128 2
+ .byte 0x40+.Lpush_r13-.Lpush_r12 # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 24
+ .byte 0x8d # DW_CFA_offset %r13
+ .uleb128 3
+ .byte 0x40+.Lpush_r14-.Lpush_r13 # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 32
+ .byte 0x84 # DW_CFA_offset %r14
+ .uleb128 4
+ .byte 0x40+.Lsubq-.Lpush_r14 # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 32+FRAME_SIZE
+ .byte 3 # DW_CFA_advance_loc2
+ .2byte .Laddq-.Lsubq
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 32
+ .byte 0x40+.Lpop_r14-.Laddq # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 24
+ .byte 0xce # DW_CFA_restore %r14
+ .byte 0x40+.Lpop_r13-.Lpop_r14 # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0xcd # DW_CFA_restore %r13
+ .byte 0x40+.Lpop_r12-.Lpop_r13 # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0xcc # DW_CFA_restore %r12
+ .byte 0x40+.LSbl1-.Lpop_r12 # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 32+FRAME_SIZE
+ .byte 0x8c # DW_CFA_offset %r12
+ .uleb128 2
+ .byte 0x8d # DW_CFA_offset %r13
+ .uleb128 3
+ .byte 0x84 # DW_CFA_offset %r14
+ .uleb128 4
+ .align 8
+.LENDFDE:
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
new file mode 100644
index 000000000..544118eb7
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
@@ -0,0 +1,420 @@
+/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelcond.h>
+#include <tcb-offsets.h>
+
+#ifdef UP
+# define LOCK
+#else
+# define LOCK lock
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+
+ .text
+
+ .align 16
+ .type __condvar_cleanup, @function
+ .globl __condvar_cleanup
+ .hidden __condvar_cleanup
+__condvar_cleanup:
+ pushq %r12
+
+ /* Get internal lock. */
+ movq %rdi, %r8
+ movq 8(%rdi), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jz 1f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+
+1: movl broadcast_seq(%rdi), %edx
+ cmpl 4(%r8), %edx
+ jne 3f
+
+ incq wakeup_seq(%rdi)
+ incq woken_seq(%rdi)
+ incl cond_futex(%rdi)
+
+3: subl $(1 << clock_bits), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ xorq %r12, %r12
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 4f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << clock_bits) - 1), %eax
+ jne 4f
+
+ addq $cond_nwaiters, %rdi
+ movl $SYS_futex, %eax
+ movl $FUTEX_WAKE, %esi
+ movl $1, %edx
+ syscall
+ subq $cond_nwaiters, %rdi
+ movl $1, %r12d
+
+4: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ je 2f
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+
+ /* Wake up all waiters to make sure no signal gets lost. */
+2: testq %r12, %r12
+ jnz 5f
+ addq $cond_futex, %rdi
+ movl $FUTEX_WAKE, %esi
+ movl $0x7fffffff, %edx
+ movl $SYS_futex, %eax
+ syscall
+
+5: movq 16(%r8), %rdi
+ callq __pthread_mutex_cond_lock
+
+ popq %r12
+
+ retq
+ .size __condvar_cleanup, .-__condvar_cleanup
+
+
+/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
+ .globl __pthread_cond_wait
+ .type __pthread_cond_wait, @function
+ .align 16
+__pthread_cond_wait:
+.LSTARTCODE:
+ pushq %r12
+.Lpush_r12:
+#define FRAME_SIZE 64
+ subq $FRAME_SIZE, %rsp
+.Lsubq:
+ /* Stack frame:
+
+ rsp + 64
+ +--------------------------+
+ rsp + 32 | cleanup buffer |
+ +--------------------------+
+ rsp + 24 | old wake_seq value |
+ +--------------------------+
+ rsp + 16 | mutex pointer |
+ +--------------------------+
+ rsp + 8 | condvar pointer |
+ +--------------------------+
+ rsp + 4 | old broadcast_seq value |
+ +--------------------------+
+ rsp + 0 | old cancellation mode |
+ +--------------------------+
+ */
+
+ cmpq $-1, dep_mutex(%rdi)
+
+ /* Prepare structure passed to cancellation handler. */
+ movq %rdi, 8(%rsp)
+ movq %rsi, 16(%rsp)
+
+ je 15f
+ movq %rsi, dep_mutex(%rdi)
+
+ /* Get internal lock. */
+15: movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jne 1f
+
+ /* Unlock the mutex. */
+2: movq 16(%rsp), %rdi
+ xorl %esi, %esi
+ callq __pthread_mutex_unlock_usercnt
+
+ testl %eax, %eax
+ jne 12f
+
+ movq 8(%rsp), %rdi
+ incq total_seq(%rdi)
+ incl cond_futex(%rdi)
+ addl $(1 << clock_bits), cond_nwaiters(%rdi)
+
+ /* Install cancellation handler. */
+#ifdef __PIC__
+ leaq __condvar_cleanup(%rip), %rsi
+#else
+ leaq __condvar_cleanup, %rsi
+#endif
+ leaq 32(%rsp), %rdi
+ movq %rsp, %rdx
+ callq __pthread_cleanup_push
+
+ /* Get and store current wakeup_seq value. */
+ movq 8(%rsp), %rdi
+ movq wakeup_seq(%rdi), %r9
+ movl broadcast_seq(%rdi), %edx
+ movq %r9, 24(%rsp)
+ movl %edx, 4(%rsp)
+
+ /* Unlock. */
+8: movl cond_futex(%rdi), %r12d
+ LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 3f
+
+4: callq __pthread_enable_asynccancel
+ movl %eax, (%rsp)
+
+ movq 8(%rsp), %rdi
+ xorq %r10, %r10
+ movq %r12, %rdx
+ addq $cond_futex-cond_lock, %rdi
+ movl $SYS_futex, %eax
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ syscall
+
+ movl (%rsp), %edi
+ callq __pthread_disable_asynccancel
+
+ /* Lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, cond_lock(%rdi)
+#endif
+ jnz 5f
+
+6: movl broadcast_seq(%rdi), %edx
+
+ movq woken_seq(%rdi), %rax
+
+ movq wakeup_seq(%rdi), %r9
+
+ cmpl 4(%rsp), %edx
+ jne 16f
+
+ cmpq 24(%rsp), %r9
+ jbe 8b
+
+ cmpq %rax, %r9
+ jna 8b
+
+ incq woken_seq(%rdi)
+
+ /* Unlock */
+16: subl $(1 << clock_bits), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 17f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << clock_bits) - 1), %eax
+ jne 17f
+
+ addq $cond_nwaiters, %rdi
+ movl $SYS_futex, %eax
+ movl $FUTEX_WAKE, %esi
+ movl $1, %edx
+ syscall
+ subq $cond_nwaiters, %rdi
+
+17: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 10f
+
+ /* Remove cancellation handler. */
+11: movq 32+CLEANUP_PREV(%rsp), %rdx
+ movq %rdx, %fs:CLEANUP
+
+ movq 16(%rsp), %rdi
+ callq __pthread_mutex_cond_lock
+14: addq $FRAME_SIZE, %rsp
+.Laddq:
+
+ popq %r12
+.Lpop_r12:
+
+ /* We return the result of the mutex_lock operation. */
+ retq
+
+ /* Initial locking failed. */
+1:
+.LSbl1:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+3:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 4b
+
+ /* Locking in loop failed. */
+5:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+ jmp 6b
+
+ /* Unlock after loop requires wakeup. */
+10:
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 11b
+
+ /* The initial unlocking of the mutex failed. */
+12: movq %rax, %r10
+ movq 8(%rsp), %rdi
+ LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ jne 13f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+
+13: movq %r10, %rax
+ jmp 14b
+.LENDCODE:
+ .size __pthread_cond_wait, .-__pthread_cond_wait
+weak_alias(__pthread_cond_wait, pthread_cond_wait)
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .long .LENDCIE-.LSTARTCIE # Length of the CIE.
+.LSTARTCIE:
+ .long 0 # CIE ID.
+ .byte 1 # Version number.
+#ifdef SHARED
+ .string "zR" # NUL-terminated augmentation
+ # string.
+#else
+ .ascii "\0" # NUL-terminated augmentation
+ # string.
+#endif
+ .uleb128 1 # Code alignment factor.
+ .sleb128 -8 # Data alignment factor.
+ .byte 16 # Return address register
+ # column.
+#ifdef SHARED
+ .uleb128 1 # Augmentation value length.
+ .byte 0x1b # Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+#endif
+ .byte 0x0c # DW_CFA_def_cfa
+ .uleb128 7
+ .uleb128 8
+ .byte 0x90 # DW_CFA_offset, column 0x8
+ .uleb128 1
+ .align 8
+.LENDCIE:
+
+ .long .LENDFDE-.LSTARTFDE # Length of the FDE.
+.LSTARTFDE:
+ .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
+#ifdef SHARED
+ .long .LSTARTCODE-. # PC-relative start address
+ # of the code
+#else
+ .long .LSTARTCODE # Start address of the code.
+#endif
+ .long .LENDCODE-.LSTARTCODE # Length of the code.
+#ifdef SHARED
+ .uleb128 0 # No augmentation data.
+#endif
+ .byte 0x40+.Lpush_r12-.LSTARTCODE # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x8c # DW_CFA_offset %r12
+ .uleb128 2
+ .byte 0x40+.Lsubq-.Lpush_r12 # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16+FRAME_SIZE
+ .byte 3 # DW_CFA_advance_loc2
+ .2byte .Laddq-.Lsubq
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x40+.Lpop_r12-.Laddq # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0xcc # DW_CFA_restore %r12
+ .byte 0x40+.LSbl1-.Lpop_r12 # DW_CFA_advance_loc+N
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 80
+ .byte 0x8c # DW_CFA_offset %r12
+ .uleb128 2
+ .align 8
+.LENDFDE:
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
new file mode 100644
index 000000000..d8bfa26c6
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
@@ -0,0 +1,259 @@
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+
+#ifndef UP
+# define LOCK lock
+#else
+# define LOCK
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+ .comm __fork_generation, 4, 4
+
+ .text
+
+
+ .globl __pthread_once
+ .type __pthread_once,@function
+ .align 16
+__pthread_once:
+.LSTARTCODE:
+ testl $2, (%rdi)
+ jz 1f
+ xorl %eax, %eax
+ retq
+
+ /* Preserve the function pointer. */
+1: pushq %rsi
+.Lpush_rsi:
+ xorq %r10, %r10
+
+ /* Not yet initialized or initialization in progress.
+ Get the fork generation counter now. */
+6: movl (%rdi), %eax
+
+5: movl %eax, %edx
+
+ testl $2, %eax
+ jnz 4f
+
+ andl $3, %edx
+ orl __fork_generation(%rip), %edx
+ orl $1, %edx
+
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jnz 5b
+
+ /* Check whether another thread already runs the initializer. */
+ testl $1, %eax
+ jz 3f /* No -> do it. */
+
+ /* Check whether the initializer execution was interrupted
+ by a fork. */
+ xorl %edx, %eax
+ testl $0xfffffffc, %eax
+ jnz 3f /* Different for generation -> run initializer. */
+
+ /* Somebody else got here first. Wait. */
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+ jmp 6b
+
+ /* Preserve the pointer to the control variable. */
+3: pushq %rdi
+.Lpush_rdi:
+
+.LcleanupSTART:
+ callq *8(%rsp)
+.LcleanupEND:
+
+ /* Get the control variable address back. */
+ popq %rdi
+.Lpop_rdi:
+
+ /* Sucessful run of the initializer. Signal that we are done. */
+ LOCK
+ incl (%rdi)
+
+ /* Wake up all other threads. */
+ movl $0x7fffffff, %edx
+ movl $FUTEX_WAKE, %esi
+ movl $SYS_futex, %eax
+ syscall
+
+4: addq $8, %rsp
+.Ladd:
+ xorl %eax, %eax
+ retq
+
+ .size __pthread_once,.-__pthread_once
+
+
+ .globl __pthread_once_internal
+__pthread_once_internal = __pthread_once
+
+ .globl pthread_once
+pthread_once = __pthread_once
+
+
+ .type clear_once_control,@function
+ .align 16
+clear_once_control:
+ movq (%rsp), %rdi
+ movq %rax, %r8
+ movl $0, (%rdi)
+
+ movl $0x7fffffff, %edx
+ movl $FUTEX_WAKE, %esi
+ movl $SYS_futex, %eax
+ syscall
+
+ movq %r8, %rdi
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ .size clear_once_control,.-clear_once_control
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte 0xff # @LPStart format (omit)
+ .byte 0xff # @TType format (omit)
+ .byte 0x01 # call-site format
+ # DW_EH_PE_uleb128
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 clear_once_control-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .long .LENDCIE-.LSTARTCIE # Length of the CIE.
+.LSTARTCIE:
+ .long 0 # CIE ID.
+ .byte 1 # Version number.
+#ifdef SHARED
+ .string "zPLR" # NUL-terminated augmentation
+ # string.
+#else
+ .string "zPL" # NUL-terminated augmentation
+ # string.
+#endif
+ .uleb128 1 # Code alignment factor.
+ .sleb128 -8 # Data alignment factor.
+ .byte 16 # Return address register
+ # column.
+#ifdef SHARED
+ .uleb128 7 # Augmentation value length.
+ .byte 0x9b # Personality: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4
+ # + DW_EH_PE_indirect
+ .long DW.ref.__gcc_personality_v0-.
+ .byte 0x1b # LSDA Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+ .byte 0x1b # FDE Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+#else
+ .uleb128 10 # Augmentation value length.
+ .byte 0x0 # Personality: absolute
+ .quad __gcc_personality_v0
+ .byte 0x0 # LSDA Encoding: absolute
+#endif
+ .byte 0x0c # DW_CFA_def_cfa
+ .uleb128 7
+ .uleb128 8
+ .byte 0x90 # DW_CFA_offset, column 0x10
+ .uleb128 1
+ .align 8
+.LENDCIE:
+
+ .long .LENDFDE-.LSTARTFDE # Length of the FDE.
+.LSTARTFDE:
+ .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
+#ifdef SHARED
+ .long .LSTARTCODE-. # PC-relative start address
+ # of the code.
+ .long .LENDCODE-.LSTARTCODE # Length of the code.
+ .uleb128 4 # Augmentation size
+ .long .LexceptSTART-.
+#else
+ .quad .LSTARTCODE # Start address of the code.
+ .quad .LENDCODE-.LSTARTCODE # Length of the code.
+ .uleb128 8 # Augmentation size
+ .quad .LexceptSTART
+#endif
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_rsi-.LSTARTCODE
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_rdi-.Lpush_rsi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 24
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpop_rdi-.Lpush_rdi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Ladd-.Lpop_rdi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 4 # DW_CFA_advance_loc4
+ .long clear_once_control-.Ladd
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 24
+#if 0
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpop_rdi2-clear_once_control
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+#endif
+ .align 8
+.LENDFDE:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 8
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
new file mode 100644
index 000000000..d7543572a
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
@@ -0,0 +1,177 @@
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+#ifndef UP
+# define LOCK lock
+#else
+# define LOCK
+#endif
+
+
+ .text
+
+ .globl __pthread_rwlock_rdlock
+ .type __pthread_rwlock_rdlock,@function
+ .align 16
+__pthread_rwlock_rdlock:
+ xorq %r10, %r10
+
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: movl WRITER(%rdi), %eax
+ testl %eax, %eax
+ jne 14f
+ cmpl $0, WRITERS_QUEUED(%rdi)
+ je 5f
+ cmpl $0, FLAGS(%rdi)
+ je 5f
+
+3: incl READERS_QUEUED(%rdi)
+ je 4f
+
+ movl READERS_WAKEUP(%rdi), %edx
+
+ LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 10f
+
+11: addq $READERS_WAKEUP, %rdi
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+
+ subq $READERS_WAKEUP, %rdi
+
+ /* Reget the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 12f
+
+13: decl READERS_QUEUED(%rdi)
+ jmp 2b
+
+5: xorl %edx, %edx
+ incl NR_READERS(%rdi)
+ je 8f
+9: LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 6f
+7:
+
+ movq %rdx, %rax
+ retq
+
+1:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 2b
+
+14: cmpl %fs:TID, %eax
+ jne 3b
+ /* Deadlock detected. */
+ movl $EDEADLK, %edx
+ jmp 9b
+
+6:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 7b
+
+ /* Overflow. */
+8: decl NR_READERS(%rdi)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+ /* Overflow. */
+4: decl READERS_QUEUED(%rdi)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+10:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 11b
+
+12:
+#if MUTEX == 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 13b
+ .size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
+
+ .globl pthread_rwlock_rdlock
+pthread_rwlock_rdlock = __pthread_rwlock_rdlock
+
+ .globl __pthread_rwlock_rdlock_internal
+__pthread_rwlock_rdlock_internal = __pthread_rwlock_rdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
new file mode 100644
index 000000000..f044842e0
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
@@ -0,0 +1,220 @@
+/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+
+#ifndef UP
+# define LOCK lock
+#else
+# define LOCK
+#endif
+
+
+ .text
+
+ .globl pthread_rwlock_timedrdlock
+ .type pthread_rwlock_timedrdlock,@function
+ .align 16
+pthread_rwlock_timedrdlock:
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ subq $16, %rsp
+
+ movq %rdi, %r12
+ movq %rsi, %r13
+
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: movl WRITER(%r12), %eax
+ testl %eax, %eax
+ jne 14f
+ cmpl $0, WRITERS_QUEUED(%r12)
+ je 5f
+ cmpl $0, FLAGS(%r12)
+ je 5f
+
+ /* Check the value of the timeout parameter. */
+3: cmpq $1000000000, 8(%r13)
+ jae 19f
+
+ incl READERS_QUEUED(%r12)
+ je 4f
+
+ movl READERS_WAKEUP(%r12), %r14d
+
+ /* Unlock. */
+ LOCK
+#if MUTEX == 0
+ decl (%r12)
+#else
+ decl MUTEX(%r12)
+#endif
+ jne 10f
+
+ /* Get current time. */
+11: movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdi
+ subq (%rsp), %rcx
+ subq %rax, %rdi
+ jns 15f
+ addq $1000000000, %rdi
+ decq %rcx
+15: testq %rcx, %rcx
+ js 16f /* Time is already up. */
+
+ /* Futex call. */
+ movq %rcx, (%rsp) /* Store relative timeout. */
+ movq %rdi, 8(%rsp)
+
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movq %rsp, %r10
+ movl %r14d, %edx
+ leaq READERS_WAKEUP(%r12), %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rdx
+17:
+
+ /* Reget the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%r12)
+#else
+ cmpxchgl %esi, MUTEX(%r12)
+#endif
+ jnz 12f
+
+13: decl READERS_QUEUED(%r12)
+ cmpq $-ETIMEDOUT, %rdx
+ jne 2b
+
+18: movl $ETIMEDOUT, %edx
+ jmp 9f
+
+
+5: xorl %edx, %edx
+ incl NR_READERS(%r12)
+ je 8f
+9: LOCK
+#if MUTEX == 0
+ decl (%r12)
+#else
+ decl MUTEX(%r12)
+#endif
+ jne 6f
+
+7: movq %rdx, %rax
+
+ addq $16, %rsp
+ popq %r14
+ popq %r13
+ popq %r12
+ retq
+
+1:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+ jmp 2b
+
+14: cmpl %fs:TID, %eax
+ jne 3b
+ movl $EDEADLK, %edx
+ jmp 9b
+
+6:
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leal MUTEX(%r12), %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 7b
+
+ /* Overflow. */
+8: decl NR_READERS(%r12)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+ /* Overflow. */
+4: decl READERS_QUEUED(%r12)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+10:
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leaq MUTEX(%r12), %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 11b
+
+12:
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leaq MUTEX(%r12), %rdi
+#endif
+ callq __lll_mutex_lock_wait
+ jmp 13b
+
+16: movq $-ETIMEDOUT, %rdx
+ jmp 17b
+
+19: movl $EINVAL, %edx
+ jmp 9b
+ .size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
new file mode 100644
index 000000000..b479da727
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
@@ -0,0 +1,211 @@
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+#ifndef UP
+# define LOCK lock
+#else
+# define LOCK
+#endif
+
+
+ .text
+
+ .globl pthread_rwlock_timedwrlock
+ .type pthread_rwlock_timedwrlock,@function
+ .align 16
+pthread_rwlock_timedwrlock:
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ subq $16, %rsp
+
+ movq %rdi, %r12
+ movq %rsi, %r13
+
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: movl WRITER(%r12), %eax
+ testl %eax, %eax
+ jne 14f
+ cmpl $0, NR_READERS(%r12)
+ je 5f
+
+ /* Check the value of the timeout parameter. */
+3: cmpq $1000000000, 8(%r13)
+ jae 19f
+
+ incl WRITERS_QUEUED(%r12)
+ je 4f
+
+ movl WRITERS_WAKEUP(%r12), %r14d
+
+ LOCK
+#if MUTEX == 0
+ decl (%r12)
+#else
+ decl MUTEX(%r12)
+#endif
+ jne 10f
+
+ /* Get current time. */
+11: movq %rsp, %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdi
+ subq (%rsp), %rcx
+ subq %rax, %rdi
+ jns 15f
+ addq $1000000000, %rdi
+ decq %rcx
+15: testq %rcx, %rcx
+ js 16f /* Time is already up. */
+
+ /* Futex call. */
+ movq %rcx, (%rsp) /* Store relative timeout. */
+ movq %rdi, 8(%rsp)
+
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movq %rsp, %r10
+ movl %r14d, %edx
+ leaq WRITERS_WAKEUP(%r12), %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rdx
+17:
+
+ /* Reget the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%r12)
+#else
+ cmpxchgl %esi, MUTEX(%r12)
+#endif
+ jnz 12f
+
+13: decl WRITERS_QUEUED(%r12)
+ cmpq $-ETIMEDOUT, %rdx
+ jne 2b
+
+18: movl $ETIMEDOUT, %edx
+ jmp 9f
+
+
+5: xorl %edx, %edx
+ movl %fs:TID, %eax
+ movl %eax, WRITER(%r12)
+9: LOCK
+#if MUTEX == 0
+ decl (%r12)
+#else
+ decl MUTEX(%r12)
+#endif
+ jne 6f
+
+7: movq %rdx, %rax
+
+ addq $16, %rsp
+ popq %r14
+ popq %r13
+ popq %r12
+ retq
+
+1:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+ jmp 2b
+
+14: cmpl %fs:TID, %eax
+ jne 3b
+20: movl $EDEADLK, %edx
+ jmp 9b
+
+6:
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leal MUTEX(%r12), %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 7b
+
+ /* Overflow. */
+4: decl WRITERS_QUEUED(%r12)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+10:
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leaq MUTEX(%r12), %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 11b
+
+12:
+#if MUTEX == 0
+ movq %r12, %rdi
+#else
+ leaq MUTEX(%r12), %rdi
+#endif
+ callq __lll_mutex_lock_wait
+ jmp 13b
+
+16: movq $-ETIMEDOUT, %rdx
+ jmp 17b
+
+19: movl $EINVAL, %edx
+ jmp 9b
+ .size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
new file mode 100644
index 000000000..a0f75226a
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
@@ -0,0 +1,129 @@
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelrwlock.h>
+
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+#ifndef UP
+# define LOCK lock
+#else
+# define LOCK
+#endif
+
+
+ .text
+
+ .globl __pthread_rwlock_unlock
+ .type __pthread_rwlock_unlock,@function
+ .align 16
+__pthread_rwlock_unlock:
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: cmpl $0, WRITER(%rdi)
+ jne 5f
+ decl NR_READERS(%rdi)
+ jnz 6f
+
+5: movl $0, WRITER(%rdi)
+
+ movl $1, %esi
+ leaq WRITERS_WAKEUP(%rdi), %r10
+ movq %rsi, %rdx
+ cmpl $0, WRITERS_QUEUED(%rdi)
+ jne 0f
+
+ /* If also no readers waiting nothing to do. */
+ cmpl $0, READERS_QUEUED(%rdi)
+ je 6f
+
+ movl $0x7fffffff, %edx
+ leaq READERS_WAKEUP(%rdi), %r10
+
+0: incl (%r10)
+ LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 7f
+
+8: movl $SYS_futex, %eax
+ movq %r10, %rdi
+ syscall
+
+ xorl %eax, %eax
+ retq
+
+ .align 16
+6: LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 3f
+
+4: xorl %eax, %eax
+ retq
+
+1:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 2b
+
+3:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 4b
+
+7:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 8b
+
+ .size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
+
+ .globl pthread_rwlock_unlock
+pthread_rwlock_unlock = __pthread_rwlock_unlock
+
+ .globl __pthread_rwlock_unlock_internal
+__pthread_rwlock_unlock_internal = __pthread_rwlock_unlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
new file mode 100644
index 000000000..39b54dc6b
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
@@ -0,0 +1,165 @@
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+#ifndef UP
+# define LOCK lock
+#else
+# define LOCK
+#endif
+
+
+ .text
+
+ .globl __pthread_rwlock_wrlock
+ .type __pthread_rwlock_wrlock,@function
+ .align 16
+__pthread_rwlock_wrlock:
+ xorq %r10, %r10
+
+ /* Get the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 1f
+
+2: movl WRITER(%rdi), %eax
+ testl %eax, %eax
+ jne 14f
+ cmpl $0, NR_READERS(%rdi)
+ je 5f
+
+3: incl WRITERS_QUEUED(%rdi)
+ je 4f
+
+ movl WRITERS_WAKEUP(%rdi), %edx
+
+ LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 10f
+
+11: addq $WRITERS_WAKEUP, %rdi
+#if FUTEX_WAIT == 0
+ xorl %esi, %esi
+#else
+ movl $FUTEX_WAIT, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+
+ subq $WRITERS_WAKEUP, %rdi
+
+ /* Reget the lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if MUTEX == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, MUTEX(%rdi)
+#endif
+ jnz 12f
+
+13: decl WRITERS_QUEUED(%rdi)
+ jmp 2b
+
+5: xorl %edx, %edx
+ movl %fs:TID, %eax
+ movl %eax, WRITER(%rdi)
+9: LOCK
+#if MUTEX == 0
+ decl (%rdi)
+#else
+ decl MUTEX(%rdi)
+#endif
+ jne 6f
+7:
+
+ movq %rdx, %rax
+ retq
+
+1:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 2b
+
+14: cmpl %fs:TID, %eax
+ jne 3b
+ movl $EDEADLK, %edx
+ jmp 9b
+
+6:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+ jmp 7b
+
+4: decl WRITERS_QUEUED(%rdi)
+ movl $EAGAIN, %edx
+ jmp 9b
+
+10:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_unlock_wake
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 11b
+
+12:
+#if MUTEX != 0
+ addq $MUTEX, %rdi
+#endif
+ callq __lll_mutex_lock_wait
+#if MUTEX != 0
+ subq $MUTEX, %rdi
+#endif
+ jmp 13b
+ .size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
+
+ .globl pthread_rwlock_wrlock
+pthread_rwlock_wrlock = __pthread_rwlock_wrlock
+
+ .globl __pthread_rwlock_wrlock_internal
+__pthread_rwlock_wrlock_internal = __pthread_rwlock_wrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c
new file mode 100644
index 000000000..640d3044f
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c
@@ -0,0 +1,14 @@
+#include <tls.h>
+
+#define RESET_VGETCPU_CACHE() \
+ do { \
+ asm volatile ("movl %0, %%fs:%P1\n\t" \
+ "movl %0, %%fs:%P2" \
+ : \
+ : "ir" (0), "i" (offsetof (struct pthread, \
+ header.vgetcpu_cache[0])), \
+ "i" (offsetof (struct pthread, \
+ header.vgetcpu_cache[1]))); \
+ } while (0)
+
+#include "../pthread_setaffinity.c"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_init.c b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_init.c
new file mode 100644
index 000000000..483de8cac
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_init.c
@@ -0,0 +1 @@
+#include <sysdeps/x86_64/pthread_spin_init.c>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_unlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_unlock.S
new file mode 100644
index 000000000..e8e2ba262
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_spin_unlock.S
@@ -0,0 +1 @@
+#include <sysdeps/x86_64/pthread_spin_unlock.S>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
new file mode 100644
index 000000000..5c8a858ad
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
@@ -0,0 +1,64 @@
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+
+#ifndef UP
+# define LOCK lock
+#else
+# define
+#endif
+
+#define FUTEX_WAKE 1
+
+
+ .text
+
+ .globl sem_post
+ .type sem_post,@function
+ .align 16
+sem_post:
+ movl $1, %edx
+ LOCK
+ xaddl %edx, (%rdi)
+
+ movl $SYS_futex, %eax
+ movl $FUTEX_WAKE, %esi
+ incl %edx
+ syscall
+
+ testq %rax, %rax
+ js 1f
+
+ xorl %eax, %eax
+ retq
+
+1:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl $EINVAL, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl $EINVAL, (%rax)
+#endif
+
+ orl $-1, %eax
+ retq
+ .size sem_post,.-sem_post
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
new file mode 100644
index 000000000..64e168099
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
@@ -0,0 +1,174 @@
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+#include <tls.h>
+
+#ifndef UP
+# define LOCK lock
+#else
+# define
+#endif
+
+/* For the calculation see asm/vsyscall.h. */
+#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
+
+
+ .text
+
+ .globl sem_timedwait
+ .type sem_timedwait,@function
+ .align 16
+ cfi_startproc
+sem_timedwait:
+ /* First check for cancellation. */
+ movl %fs:CANCELHANDLING, %eax
+ andl $0xfffffff9, %eax
+ cmpl $8, %eax
+ je 11f
+
+ movl (%rdi), %eax
+2: testl %eax, %eax
+ je 1f
+
+ leaq -1(%rax), %rdx
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jne 2b
+
+ xorl %eax, %eax
+ retq
+
+ /* Check whether the timeout value is valid. */
+1: pushq %r12
+ cfi_adjust_cfa_offset(8)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ pushq %r14
+ cfi_adjust_cfa_offset(8)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset(24)
+
+ movq %rdi, %r12
+ cfi_offset(12, -16) /* %r12 */
+ movq %rsi, %r13
+ cfi_offset(13, -24) /* %r13 */
+
+ /* Check for invalid nanosecond field. */
+ cmpq $1000000000, 8(%r13)
+ movl $EINVAL, %r14d
+ cfi_offset(14, -24) /* %r14 */
+ jae 6f
+
+7: call __pthread_enable_asynccancel
+ movl %eax, 16(%rsp)
+
+ xorl %esi, %esi
+ movq %rsp, %rdi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 8(%rsp), %rax
+ movl $1000, %edi
+ mul %rdi /* Milli seconds to nano seconds. */
+ movq (%r13), %rdi
+ movq 8(%r13), %rsi
+ subq (%rsp), %rdi
+ subq %rax, %rsi
+ jns 5f
+ addq $1000000000, %rsi
+ decq %rdi
+5: testq %rdi, %rdi
+ movl $ETIMEDOUT, %r14d
+ js 6f /* Time is already up. */
+
+ movq %rdi, (%rsp) /* Store relative timeout. */
+ movq %rsi, 8(%rsp)
+
+ movq %rsp, %r10
+ movq %r12, %rdi
+ xorl %esi, %esi
+ movl $SYS_futex, %eax
+ xorl %edx, %edx
+ syscall
+ movq %rax, %r14
+
+ movl 16(%rsp), %edi
+ call __pthread_disable_asynccancel
+
+ testq %r14, %r14
+ je 9f
+ cmpq $-EWOULDBLOCK, %r14
+ jne 3f
+
+9: movl (%r12), %eax
+8: testl %eax, %eax
+ je 7b
+
+ leaq -1(%rax), %rcx
+ LOCK
+ cmpxchgl %ecx, (%r12)
+ jne 8b
+
+ xorl %eax, %eax
+10: addq $24, %rsp
+ cfi_adjust_cfa_offset(-24)
+ popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(14)
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(12)
+ retq
+
+ cfi_adjust_cfa_offset(48)
+ cfi_offset(12, -16) /* %r12 */
+ cfi_offset(13, -24) /* %r13 */
+ cfi_offset(14, -32) /* %r14 */
+3: negq %r14
+6:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl %r14d, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl %r14d, (%rax)
+#endif
+
+ orl $-1, %eax
+ jmp 10b
+ cfi_adjust_cfa_offset(-48)
+ cfi_restore(14)
+ cfi_restore(13)
+ cfi_restore(12)
+
+11: /* Canceled. */
+ movq $0xffffffffffffffff, %fs:RESULT
+ LOCK
+ orl $0x10, %fs:CANCELHANDLING
+ movq %fs:CLEANUP_JMP_BUF, %rdi
+ jmp HIDDEN_JUMPTARGET (__pthread_unwind)
+ cfi_endproc
+ .size sem_timedwait,.-sem_timedwait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
new file mode 100644
index 000000000..08edc390c
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
@@ -0,0 +1,57 @@
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+
+#ifndef UP
+# define LOCK lock
+#else
+# define
+#endif
+
+ .text
+
+ .globl sem_trywait
+ .type sem_trywait,@function
+ .align 16
+sem_trywait:
+ movl (%rdi), %eax
+2: testl %eax, %eax
+ jz 1f
+
+ leaq -1(%rax), %rdx
+ LOCK
+ cmpxchgl %edx, (%rdi)
+ jne 2b
+
+ xorl %eax, %eax
+ retq
+
+1:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl $EAGAIN, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl $EAGAIN, (%rax)
+#endif
+ orl $-1, %eax
+ retq
+ .size sem_trywait,.-sem_trywait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
new file mode 100644
index 000000000..c2f94d47f
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
@@ -0,0 +1,119 @@
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+#include <tls.h>
+
+#ifndef UP
+# define LOCK lock
+#else
+# define
+#endif
+
+
+ .text
+
+ .globl sem_wait
+ .type sem_wait,@function
+ .align 16
+ cfi_startproc
+sem_wait:
+ /* First check for cancellation. */
+ movl %fs:CANCELHANDLING, %eax
+ andl $0xfffffff9, %eax
+ cmpl $8, %eax
+ je 4f
+
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(12, -16)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ movq %rdi, %r13
+ cfi_offset(13, -24)
+
+3: movl (%r13), %eax
+2: testl %eax, %eax
+ je 1f
+
+ leaq -1(%rax), %rdx
+ LOCK
+ cmpxchgl %edx, (%r13)
+ jne 2b
+ xorl %eax, %eax
+
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(12)
+
+ retq
+
+ cfi_adjust_cfa_offset(16)
+ cfi_offset(12, -16)
+ cfi_offset(13, -24)
+1: call __pthread_enable_asynccancel
+ movl %eax, %r8d
+
+ xorq %r10, %r10
+ movl $SYS_futex, %eax
+ movq %r13, %rdi
+ movq %r10, %rsi
+ movq %r10, %rdx
+ syscall
+ movq %rax, %r12
+
+ movl %r8d, %edi
+ call __pthread_disable_asynccancel
+
+ testq %r12, %r12
+ je 3b
+ cmpq $-EWOULDBLOCK, %r12
+ je 3b
+ negq %r12
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl %r12d, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl %r12d, (%rax)
+#endif
+ orl $-1, %eax
+
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(12)
+
+ retq
+
+4: /* Canceled. */
+ movq $0xffffffffffffffff, %fs:RESULT
+ LOCK
+ orl $0x10, %fs:CANCELHANDLING
+ movq %fs:CLEANUP_JMP_BUF, %rdi
+ jmp HIDDEN_JUMPTARGET (__pthread_unwind)
+ cfi_endproc
+ .size sem_wait,.-sem_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
new file mode 100644
index 000000000..1c93952d4
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
@@ -0,0 +1,111 @@
+/* Copyright (C) 2002-2006, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <tls.h>
+#ifndef __ASSEMBLER__
+# include <pthreadP.h>
+#endif
+
+#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
+
+/* The code to disable cancellation depends on the fact that the called
+ functions are special. They don't modify registers other than %rax
+ and %r11 if they return. Therefore we don't have to preserve other
+ registers around these calls. */
+# undef PSEUDO
+# define PSEUDO(name, syscall_name, args) \
+ .text; \
+ ENTRY (name) \
+ SINGLE_THREAD_P; \
+ jne L(pseudo_cancel); \
+ .type __##syscall_name##_nocancel,@function; \
+ .globl __##syscall_name##_nocancel; \
+ __##syscall_name##_nocancel: \
+ DO_CALL (syscall_name, args); \
+ cmpq $-4095, %rax; \
+ jae SYSCALL_ERROR_LABEL; \
+ ret; \
+ .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
+ L(pseudo_cancel): \
+ /* We always have to align the stack before calling a function. */ \
+ subq $8, %rsp; cfi_adjust_cfa_offset (8); \
+ CENABLE \
+ /* The return value from CENABLE is argument for CDISABLE. */ \
+ movq %rax, (%rsp); \
+ DO_CALL (syscall_name, args); \
+ movq (%rsp), %rdi; \
+ /* Save %rax since it's the error code from the syscall. */ \
+ movq %rax, %rdx; \
+ CDISABLE \
+ movq %rdx, %rax; \
+ addq $8,%rsp; cfi_adjust_cfa_offset (-8); \
+ cmpq $-4095, %rax; \
+ jae SYSCALL_ERROR_LABEL; \
+ L(pseudo_end):
+
+
+# ifdef IS_IN_libpthread
+# define CENABLE call __pthread_enable_asynccancel;
+# define CDISABLE call __pthread_disable_asynccancel;
+# define __local_multiple_threads __pthread_multiple_threads
+# elif !defined NOT_IN_libc
+# define CENABLE call __libc_enable_asynccancel;
+# define CDISABLE call __libc_disable_asynccancel;
+# define __local_multiple_threads __libc_multiple_threads
+# elif defined IS_IN_librt
+# define CENABLE call __librt_enable_asynccancel;
+# define CDISABLE call __librt_disable_asynccancel;
+# else
+# error Unsupported library
+# endif
+
+# if defined IS_IN_libpthread || !defined NOT_IN_libc
+# ifndef __ASSEMBLER__
+extern int __local_multiple_threads attribute_hidden;
+# define SINGLE_THREAD_P \
+ __builtin_expect (__local_multiple_threads == 0, 1)
+# else
+# define SINGLE_THREAD_P cmpl $0, __local_multiple_threads(%rip)
+# endif
+
+# else
+
+# ifndef __ASSEMBLER__
+# define SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+# else
+# define SINGLE_THREAD_P cmpl $0, %fs:MULTIPLE_THREADS_OFFSET
+# endif
+
+# endif
+
+#elif !defined __ASSEMBLER__
+
+# define SINGLE_THREAD_P (1)
+# define NO_CANCELLATION 1
+
+#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep.h
new file mode 100644
index 000000000..fe92a8ab5
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep.h
@@ -0,0 +1,259 @@
+/* Copyright (C) 2001-2005, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _LINUX_X86_64_SYSDEP_H
+#define _LINUX_X86_64_SYSDEP_H 1
+
+/* There is some commonality. */
+#include <sys/syscall.h>
+#include <sysdeps/i386/sysdep.h>
+
+#ifdef IS_IN_rtld
+# include <dl-sysdep.h> /* Defines RTLD_PRIVATE_ERRNO. */
+#endif
+
+/* For Linux we can use the system call table in the header file
+ /usr/include/asm/unistd.h
+ of the kernel. But these symbols do not follow the SYS_* syntax
+ so we have to redefine the `SYS_ify' macro here. */
+#undef SYS_ify
+#define SYS_ify(syscall_name) __NR_##syscall_name
+
+/* This is a kludge to make syscalls.list find these under the names
+ pread and pwrite, since some kernel headers define those names
+ and some define the *64 names for the same system calls. */
+#if !defined __NR_pread && defined __NR_pread64
+# define __NR_pread __NR_pread64
+#endif
+#if !defined __NR_pwrite && defined __NR_pwrite64
+# define __NR_pwrite __NR_pwrite64
+#endif
+
+/* This is to help the old kernel headers where __NR_semtimedop is not
+ available. */
+#ifndef __NR_semtimedop
+# define __NR_semtimedop 220
+#endif
+
+
+#ifdef __ASSEMBLER__
+
+/* Linux uses a negative return value to indicate syscall errors,
+ unlike most Unices, which use the condition codes' carry flag.
+
+ Since version 2.1 the return value of a system call might be
+ negative even if the call succeeded. E.g., the `lseek' system call
+ might return a large offset. Therefore we must not anymore test
+ for < 0, but test for a real error by making sure the value in %eax
+ is a real error number. Linus said he will make sure the no syscall
+ returns a value in -1 .. -4095 as a valid result so we can savely
+ test with -4095. */
+
+/* We don't want the label for the error handle to be global when we define
+ it here. */
+# ifdef __PIC__
+# define SYSCALL_ERROR_LABEL 0f
+# else
+# define SYSCALL_ERROR_LABEL syscall_error
+# endif
+
+# undef PSEUDO
+# define PSEUDO(name, syscall_name, args) \
+ .text; \
+ ENTRY (name) \
+ DO_CALL (syscall_name, args); \
+ cmpq $-4095, %rax; \
+ jae SYSCALL_ERROR_LABEL; \
+ L(pseudo_end):
+
+# undef PSEUDO_END
+# define PSEUDO_END(name) \
+ SYSCALL_ERROR_HANDLER \
+ END (name)
+
+# undef PSEUDO_NOERRNO
+# define PSEUDO_NOERRNO(name, syscall_name, args) \
+ .text; \
+ ENTRY (name) \
+ DO_CALL (syscall_name, args)
+
+# undef PSEUDO_END_NOERRNO
+# define PSEUDO_END_NOERRNO(name) \
+ END (name)
+
+# define ret_NOERRNO ret
+
+# undef PSEUDO_ERRVAL
+# define PSEUDO_ERRVAL(name, syscall_name, args) \
+ .text; \
+ ENTRY (name) \
+ DO_CALL (syscall_name, args); \
+ negq %rax
+
+# undef PSEUDO_END_ERRVAL
+# define PSEUDO_END_ERRVAL(name) \
+ END (name)
+
+# define ret_ERRVAL ret
+
+# ifndef __PIC__
+# define SYSCALL_ERROR_HANDLER /* Nothing here; code in sysdep.S is used. */
+# elif defined(RTLD_PRIVATE_ERRNO)
+# define SYSCALL_ERROR_HANDLER \
+0: \
+ leaq rtld_errno(%rip), %rcx; \
+ xorl %edx, %edx; \
+ subq %rax, %rdx; \
+ movl %edx, (%rcx); \
+ orq $-1, %rax; \
+ jmp L(pseudo_end);
+# elif USE___THREAD
+# ifndef NOT_IN_libc
+# define SYSCALL_ERROR_ERRNO __libc_errno
+# else
+# define SYSCALL_ERROR_ERRNO errno
+# endif
+# define SYSCALL_ERROR_HANDLER \
+0: \
+ movq SYSCALL_ERROR_ERRNO@GOTTPOFF(%rip), %rcx;\
+ xorl %edx, %edx; \
+ subq %rax, %rdx; \
+ movl %edx, %fs:(%rcx); \
+ orq $-1, %rax; \
+ jmp L(pseudo_end);
+# elif defined _LIBC_REENTRANT
+/* Store (- %rax) into errno through the GOT.
+ Note that errno occupies only 4 bytes. */
+# define SYSCALL_ERROR_HANDLER \
+0: \
+ xorl %edx, %edx; \
+ subq %rax, %rdx; \
+ pushq %rdx; \
+ cfi_adjust_cfa_offset(8); \
+ call __errno_location@PLT; \
+ popq %rdx; \
+ cfi_adjust_cfa_offset(-8); \
+ movl %edx, (%rax); \
+ orq $-1, %rax; \
+ jmp L(pseudo_end);
+
+/* A quick note: it is assumed that the call to `__errno_location' does
+ not modify the stack! */
+# else /* Not _LIBC_REENTRANT. */
+# define SYSCALL_ERROR_HANDLER \
+0:movq errno@GOTPCREL(%RIP), %rcx; \
+ xorl %edx, %edx; \
+ subq %rax, %rdx; \
+ movl %edx, (%rcx); \
+ orq $-1, %rax; \
+ jmp L(pseudo_end);
+# endif /* __PIC__ */
+
+/* The Linux/x86-64 kernel expects the system call parameters in
+ registers according to the following table:
+
+ syscall number rax
+ arg 1 rdi
+ arg 2 rsi
+ arg 3 rdx
+ arg 4 r10
+ arg 5 r8
+ arg 6 r9
+
+ The Linux kernel uses and destroys internally these registers:
+ return address from
+ syscall rcx
+ eflags from syscall r11
+
+ Normal function call, including calls to the system call stub
+ functions in the libc, get the first six parameters passed in
+ registers and the seventh parameter and later on the stack. The
+ register use is as follows:
+
+ system call number in the DO_CALL macro
+ arg 1 rdi
+ arg 2 rsi
+ arg 3 rdx
+ arg 4 rcx
+ arg 5 r8
+ arg 6 r9
+
+ We have to take care that the stack is aligned to 16 bytes. When
+ called the stack is not aligned since the return address has just
+ been pushed.
+
+
+ Syscalls of more than 6 arguments are not supported. */
+
+# undef DO_CALL
+# define DO_CALL(syscall_name, args) \
+ DOARGS_##args \
+ movl $SYS_ify (syscall_name), %eax; \
+ syscall;
+
+# define DOARGS_0 /* nothing */
+# define DOARGS_1 /* nothing */
+# define DOARGS_2 /* nothing */
+# define DOARGS_3 /* nothing */
+# define DOARGS_4 movq %rcx, %r10;
+# define DOARGS_5 DOARGS_4
+# define DOARGS_6 DOARGS_5
+
+#endif /* __ASSEMBLER__ */
+
+
+/* Pointer mangling support. */
+#if defined NOT_IN_libc && defined IS_IN_rtld
+/* We cannot use the thread descriptor because in ld.so we use setjmp
+ earlier than the descriptor is initialized. */
+# ifdef __ASSEMBLER__
+# define PTR_MANGLE(reg) xorq __pointer_chk_guard_local(%rip), reg; \
+ rolq $17, reg
+# define PTR_DEMANGLE(reg) rorq $17, reg; \
+ xorq __pointer_chk_guard_local(%rip), reg
+# else
+# define PTR_MANGLE(reg) asm ("xorq __pointer_chk_guard_local(%%rip), %0\n" \
+ "rolq $17, %0" \
+ : "=r" (reg) : "0" (reg))
+# define PTR_DEMANGLE(reg) asm ("rorq $17, %0\n" \
+ "xorq __pointer_chk_guard_local(%%rip), %0" \
+ : "=r" (reg) : "0" (reg))
+# endif
+#else
+# ifdef __ASSEMBLER__
+# define PTR_MANGLE(reg) xorq %fs:POINTER_GUARD, reg; \
+ rolq $17, reg
+# define PTR_DEMANGLE(reg) rorq $17, reg; \
+ xorq %fs:POINTER_GUARD, reg
+# else
+# define PTR_MANGLE(var) asm ("xorq %%fs:%c2, %0\n" \
+ "rolq $17, %0" \
+ : "=r" (var) \
+ : "0" (var), \
+ "i" (offsetof (tcbhead_t, \
+ pointer_guard)))
+# define PTR_DEMANGLE(var) asm ("rorq $17, %0\n" \
+ "xorq %%fs:%c2, %0" \
+ : "=r" (var) \
+ : "0" (var), \
+ "i" (offsetof (tcbhead_t, \
+ pointer_guard)))
+# endif
+#endif
+
+#endif /* linux/x86_64/sysdep.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/vfork.S
new file mode 100644
index 000000000..9a9912ca8
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/vfork.S
@@ -0,0 +1,43 @@
+/* Copyright (C) 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* We want an #include_next, but we are the main source file.
+ So, #include ourselves and in that incarnation we can use #include_next. */
+#ifndef INCLUDED_SELF
+# define INCLUDED_SELF
+# include <vfork.S>
+#else
+
+# include <tcb-offsets.h>
+
+# define SAVE_PID \
+ movl %fs:PID, %esi; \
+ movl $0x80000000, %ecx; \
+ movl %esi, %edx; \
+ negl %edx; \
+ cmove %ecx, %edx; \
+ movl %edx, %fs:PID
+
+# define RESTORE_PID \
+ testq %rax, %rax; \
+ je 1f; \
+ movl %esi, %fs:PID; \
+1:
+
+# include_next <vfork.S>
+#endif