summaryrefslogtreecommitdiff
path: root/libpthread/linuxthreads
diff options
context:
space:
mode:
authorEric Andersen <andersen@codepoet.org>2003-02-27 18:13:05 +0000
committerEric Andersen <andersen@codepoet.org>2003-02-27 18:13:05 +0000
commit187dd78d7bd1c03fcf16e54a30314512d38e1a4a (patch)
tree9780638e5286b40da74a128c9f540a9ea720862f /libpthread/linuxthreads
parentd4d6e2c50565da18253cd0d6f3332484142b6587 (diff)
Major update for pthreads, based in large part on improvements
from glibc 2.3. This should make threads much more efficient. -Erik
Diffstat (limited to 'libpthread/linuxthreads')
-rw-r--r--libpthread/linuxthreads/condvar.c308
-rw-r--r--libpthread/linuxthreads/internals.h37
-rw-r--r--libpthread/linuxthreads/join.c18
-rw-r--r--libpthread/linuxthreads/manager.c100
-rw-r--r--libpthread/linuxthreads/mutex.c216
-rw-r--r--libpthread/linuxthreads/pthread.c416
-rw-r--r--libpthread/linuxthreads/restart.h26
-rw-r--r--libpthread/linuxthreads/semaphore.c159
-rw-r--r--libpthread/linuxthreads/semaphore.h36
-rw-r--r--libpthread/linuxthreads/signals.c3
-rw-r--r--libpthread/linuxthreads/spinlock.c616
-rw-r--r--libpthread/linuxthreads/spinlock.h132
-rw-r--r--libpthread/linuxthreads/sysdeps/alpha/pt-machine.h45
-rw-r--r--libpthread/linuxthreads/sysdeps/alpha/stackinfo.h28
-rw-r--r--libpthread/linuxthreads/sysdeps/arm/pt-machine.h19
-rw-r--r--libpthread/linuxthreads/sysdeps/arm/sigcontextinfo.h75
-rw-r--r--libpthread/linuxthreads/sysdeps/arm/stackinfo.h28
-rw-r--r--libpthread/linuxthreads/sysdeps/cris/pt-machine.h20
-rw-r--r--libpthread/linuxthreads/sysdeps/cris/stackinfo.h28
-rw-r--r--libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h38
-rw-r--r--libpthread/linuxthreads/sysdeps/i386/pt-machine.h27
-rw-r--r--libpthread/linuxthreads/sysdeps/i386/sigcontextinfo.h45
-rw-r--r--libpthread/linuxthreads/sysdeps/i386/stackinfo.h28
-rw-r--r--libpthread/linuxthreads/sysdeps/i386/tls.h183
-rw-r--r--libpthread/linuxthreads/sysdeps/i386/useldt.h213
-rw-r--r--libpthread/linuxthreads/sysdeps/m68k/pt-machine.h27
-rw-r--r--libpthread/linuxthreads/sysdeps/m68k/stackinfo.h28
-rw-r--r--libpthread/linuxthreads/sysdeps/mips/pt-machine.h111
-rw-r--r--libpthread/linuxthreads/sysdeps/mips/stackinfo.h28
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h82
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/stackinfo.h28
-rw-r--r--libpthread/linuxthreads/sysdeps/pthread/bits/libc-lock.h327
-rw-r--r--libpthread/linuxthreads/sysdeps/pthread/bits/libc-tsd.h35
-rw-r--r--libpthread/linuxthreads/sysdeps/pthread/tls.h81
-rw-r--r--libpthread/linuxthreads/sysdeps/sh/pt-machine.h33
-rw-r--r--libpthread/linuxthreads/sysdeps/sh/stackinfo.h28
-rw-r--r--libpthread/linuxthreads/sysdeps/sh/tls.h115
-rw-r--r--libpthread/linuxthreads/sysdeps/sparc/stackinfo.h28
38 files changed, 3041 insertions, 754 deletions
diff --git a/libpthread/linuxthreads/condvar.c b/libpthread/linuxthreads/condvar.c
index 8bc114e3c..f9c46a331 100644
--- a/libpthread/linuxthreads/condvar.c
+++ b/libpthread/linuxthreads/condvar.c
@@ -25,22 +25,6 @@
#include "queue.h"
#include "restart.h"
-static int pthread_cond_timedwait_relative_old(pthread_cond_t *,
- pthread_mutex_t *, const struct timespec *);
-
-static int pthread_cond_timedwait_relative_new(pthread_cond_t *,
- pthread_mutex_t *, const struct timespec *);
-
-static int (*pthread_cond_tw_rel)(pthread_cond_t *, pthread_mutex_t *,
- const struct timespec *) = pthread_cond_timedwait_relative_old;
-
-/* initialize this module */
-void __pthread_init_condvar(int rt_sig_available)
-{
- if (rt_sig_available)
- pthread_cond_tw_rel = pthread_cond_timedwait_relative_new;
-}
-
int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *cond_attr)
{
@@ -76,12 +60,20 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
volatile pthread_descr self = thread_self();
pthread_extricate_if extr;
int already_canceled = 0;
+ int spurious_wakeup_count;
+
+ /* Check whether the mutex is locked and owned by this thread. */
+ if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
+ && mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
+ && mutex->__m_owner != self)
+ return EINVAL;
/* Set up extrication interface */
extr.pu_object = cond;
extr.pu_extricate_func = cond_extricate_func;
/* Register extrication interface */
+ THREAD_SETMEM(self, p_condvar_avail, 0);
__pthread_set_own_extricate_if(self, &extr);
/* Atomically enqueue thread for waiting, but only if it is not
@@ -106,7 +98,21 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
pthread_mutex_unlock(mutex);
- suspend(self);
+ spurious_wakeup_count = 0;
+ while (1)
+ {
+ suspend(self);
+ if (THREAD_GETMEM(self, p_condvar_avail) == 0
+ && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
+ || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
+ {
+ /* Count resumes that don't belong to us. */
+ spurious_wakeup_count++;
+ continue;
+ }
+ break;
+ }
+
__pthread_set_own_extricate_if(self, 0);
/* Check for cancellation again, to provide correct cancellation
@@ -119,31 +125,36 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
pthread_exit(PTHREAD_CANCELED);
}
+ /* Put back any resumes we caught that don't belong to us. */
+ while (spurious_wakeup_count--)
+ restart(self);
+
pthread_mutex_lock(mutex);
return 0;
}
-/* The following function is used on kernels that don't have rt signals.
- SIGUSR1 is used as the restart signal. The different code is needed
- because that ordinary signal does not queue. */
-
static int
-pthread_cond_timedwait_relative_old(pthread_cond_t *cond,
+pthread_cond_timedwait_relative(pthread_cond_t *cond,
pthread_mutex_t *mutex,
const struct timespec * abstime)
{
volatile pthread_descr self = thread_self();
- sigset_t unblock, initial_mask;
int already_canceled = 0;
- int was_signalled = 0;
- sigjmp_buf jmpbuf;
pthread_extricate_if extr;
+ int spurious_wakeup_count;
+
+ /* Check whether the mutex is locked and owned by this thread. */
+ if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
+ && mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
+ && mutex->__m_owner != self)
+ return EINVAL;
/* Set up extrication interface */
extr.pu_object = cond;
extr.pu_extricate_func = cond_extricate_func;
/* Register extrication interface */
+ THREAD_SETMEM(self, p_condvar_avail, 0);
__pthread_set_own_extricate_if(self, &extr);
/* Enqueue to wait on the condition and check for cancellation. */
@@ -162,202 +173,40 @@ pthread_cond_timedwait_relative_old(pthread_cond_t *cond,
pthread_mutex_unlock(mutex);
- if (atomic_decrement(&self->p_resume_count) == 0) {
- /* Set up a longjmp handler for the restart signal, unblock
- the signal and sleep. */
-
- if (sigsetjmp(jmpbuf, 1) == 0) {
- THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
- THREAD_SETMEM(self, p_signal, 0);
- /* Unblock the restart signal */
- sigemptyset(&unblock);
- sigaddset(&unblock, __pthread_sig_restart);
- sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
-
- while (1) {
- struct timeval now;
- struct timespec reltime;
-
- /* Compute a time offset relative to now. */
- gettimeofday (&now, NULL);
- reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
- reltime.tv_sec = abstime->tv_sec - now.tv_sec;
- if (reltime.tv_nsec < 0) {
- reltime.tv_nsec += 1000000000;
- reltime.tv_sec -= 1;
- }
-
- /* Sleep for the required duration. If woken by a signal, resume waiting
- as required by Single Unix Specification. */
- if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
- break;
- }
-
- /* Block the restart signal again */
- sigprocmask(SIG_SETMASK, &initial_mask, NULL);
- was_signalled = 0;
- } else {
- was_signalled = 1;
- }
- THREAD_SETMEM(self, p_signal_jmp, NULL);
- }
-
- /* Now was_signalled is true if we exited the above code
- due to the delivery of a restart signal. In that case,
- we know we have been dequeued and resumed and that the
- resume count is balanced. Otherwise, there are some
- cases to consider. First, try to bump up the resume count
- back to zero. If it goes to 1, it means restart() was
- invoked on this thread. The signal must be consumed
- and the count bumped down and everything is cool.
- Otherwise, no restart was delivered yet, so we remove
- the thread from the queue. If this succeeds, it's a clear
- case of timeout. If we fail to remove from the queue, then we
- must wait for a restart. */
-
- if (!was_signalled) {
- if (atomic_increment(&self->p_resume_count) != -1) {
- __pthread_wait_for_restart_signal(self);
- atomic_decrement(&self->p_resume_count); /* should be zero now! */
- } else {
- int was_on_queue;
- __pthread_lock(&cond->__c_lock, self);
- was_on_queue = remove_from_queue(&cond->__c_waiting, self);
- __pthread_unlock(&cond->__c_lock);
-
- if (was_on_queue) {
- __pthread_set_own_extricate_if(self, 0);
- pthread_mutex_lock(mutex);
- return ETIMEDOUT;
- }
-
- suspend(self);
- }
- }
-
- __pthread_set_own_extricate_if(self, 0);
-
- /* The remaining logic is the same as in other cancellable waits,
- such as pthread_join sem_wait or pthread_cond wait. */
-
- if (THREAD_GETMEM(self, p_woken_by_cancel)
- && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
- THREAD_SETMEM(self, p_woken_by_cancel, 0);
- pthread_mutex_lock(mutex);
- pthread_exit(PTHREAD_CANCELED);
- }
-
- pthread_mutex_lock(mutex);
- return 0;
-}
-
-/* The following function is used on new (late 2.1 and 2.2 and higher) kernels
- that have rt signals which queue. */
-
-static int
-pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
- pthread_mutex_t *mutex,
- const struct timespec * abstime)
-{
- volatile pthread_descr self = thread_self();
- sigset_t unblock, initial_mask;
- int already_canceled = 0;
- int was_signalled = 0;
- sigjmp_buf jmpbuf;
- pthread_extricate_if extr;
-
- /* Set up extrication interface */
- extr.pu_object = cond;
- extr.pu_extricate_func = cond_extricate_func;
-
- /* Register extrication interface */
- __pthread_set_own_extricate_if(self, &extr);
-
- /* Enqueue to wait on the condition and check for cancellation. */
- __pthread_lock(&cond->__c_lock, self);
- if (!(THREAD_GETMEM(self, p_canceled)
- && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
- enqueue(&cond->__c_waiting, self);
- else
- already_canceled = 1;
- __pthread_unlock(&cond->__c_lock);
+ spurious_wakeup_count = 0;
+ while (1)
+ {
+ if (!timedsuspend(self, abstime)) {
+ int was_on_queue;
- if (already_canceled) {
- __pthread_set_own_extricate_if(self, 0);
- pthread_exit(PTHREAD_CANCELED);
- }
+ /* __pthread_lock will queue back any spurious restarts that
+ may happen to it. */
- pthread_mutex_unlock(mutex);
+ __pthread_lock(&cond->__c_lock, self);
+ was_on_queue = remove_from_queue(&cond->__c_waiting, self);
+ __pthread_unlock(&cond->__c_lock);
- /* Set up a longjmp handler for the restart signal, unblock
- the signal and sleep. */
-
- if (sigsetjmp(jmpbuf, 1) == 0) {
- THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
- THREAD_SETMEM(self, p_signal, 0);
- /* Unblock the restart signal */
- sigemptyset(&unblock);
- sigaddset(&unblock, __pthread_sig_restart);
- sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
-
- while (1) {
- struct timeval now;
- struct timespec reltime;
-
- /* Compute a time offset relative to now. */
- gettimeofday (&now, NULL);
- reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
- reltime.tv_sec = abstime->tv_sec - now.tv_sec;
- if (reltime.tv_nsec < 0) {
- reltime.tv_nsec += 1000000000;
- reltime.tv_sec -= 1;
+ if (was_on_queue) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_mutex_lock(mutex);
+ return ETIMEDOUT;
}
- /* Sleep for the required duration. If woken by a signal,
- resume waiting as required by Single Unix Specification. */
- if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
- break;
+ /* Eat the outstanding restart() from the signaller */
+ suspend(self);
}
- /* Block the restart signal again */
- sigprocmask(SIG_SETMASK, &initial_mask, NULL);
- was_signalled = 0;
- } else {
- was_signalled = 1;
- }
- THREAD_SETMEM(self, p_signal_jmp, NULL);
-
- /* Now was_signalled is true if we exited the above code
- due to the delivery of a restart signal. In that case,
- everything is cool. We have been removed from the queue
- by the other thread, and consumed its signal.
-
- Otherwise we this thread woke up spontaneously, or due to a signal other
- than restart. The next thing to do is to try to remove the thread
- from the queue. This may fail due to a race against another thread
- trying to do the same. In the failed case, we know we were signalled,
- and we may also have to consume a restart signal. */
-
- if (!was_signalled) {
- int was_on_queue;
-
- /* __pthread_lock will queue back any spurious restarts that
- may happen to it. */
-
- __pthread_lock(&cond->__c_lock, self);
- was_on_queue = remove_from_queue(&cond->__c_waiting, self);
- __pthread_unlock(&cond->__c_lock);
-
- if (was_on_queue) {
- __pthread_set_own_extricate_if(self, 0);
- pthread_mutex_lock(mutex);
- return ETIMEDOUT;
+ if (THREAD_GETMEM(self, p_condvar_avail) == 0
+ && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
+ || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
+ {
+ /* Count resumes that don't belong to us. */
+ spurious_wakeup_count++;
+ continue;
+ }
+ break;
}
- /* Eat the outstanding restart() from the signaller */
- suspend(self);
- }
-
__pthread_set_own_extricate_if(self, 0);
/* The remaining logic is the same as in other cancellable waits,
@@ -370,6 +219,10 @@ pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
pthread_exit(PTHREAD_CANCELED);
}
+ /* Put back any resumes we caught that don't belong to us. */
+ while (spurious_wakeup_count--)
+ restart(self);
+
pthread_mutex_lock(mutex);
return 0;
}
@@ -378,7 +231,7 @@ int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec * abstime)
{
/* Indirect call through pointer! */
- return pthread_cond_tw_rel(cond, mutex, abstime);
+ return pthread_cond_timedwait_relative(cond, mutex, abstime);
}
int pthread_cond_signal(pthread_cond_t *cond)
@@ -388,7 +241,11 @@ int pthread_cond_signal(pthread_cond_t *cond)
__pthread_lock(&cond->__c_lock, NULL);
th = dequeue(&cond->__c_waiting);
__pthread_unlock(&cond->__c_lock);
- if (th != NULL) restart(th);
+ if (th != NULL) {
+ th->p_condvar_avail = 1;
+ WRITE_MEMORY_BARRIER();
+ restart(th);
+ }
return 0;
}
@@ -402,7 +259,11 @@ int pthread_cond_broadcast(pthread_cond_t *cond)
cond->__c_waiting = NULL;
__pthread_unlock(&cond->__c_lock);
/* Now signal each process in the queue */
- while ((th = dequeue(&tosignal)) != NULL) restart(th);
+ while ((th = dequeue(&tosignal)) != NULL) {
+ th->p_condvar_avail = 1;
+ WRITE_MEMORY_BARRIER();
+ restart(th);
+ }
return 0;
}
@@ -418,19 +279,18 @@ int pthread_condattr_destroy(pthread_condattr_t *attr)
int pthread_condattr_getpshared (const pthread_condattr_t *attr, int *pshared)
{
- *pshared = PTHREAD_PROCESS_PRIVATE;
- return 0;
+ *pshared = PTHREAD_PROCESS_PRIVATE;
+ return 0;
}
int pthread_condattr_setpshared (pthread_condattr_t *attr, int pshared)
{
- if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
- return EINVAL;
+ if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
+ return EINVAL;
- /* For now it is not possible to share a conditional variable. */
- if (pshared != PTHREAD_PROCESS_PRIVATE)
- return ENOSYS;
+ /* For now it is not possible to shared a conditional variable. */
+ if (pshared != PTHREAD_PROCESS_PRIVATE)
+ return ENOSYS;
- return 0;
+ return 0;
}
-
diff --git a/libpthread/linuxthreads/internals.h b/libpthread/linuxthreads/internals.h
index ffa52aff3..45439f363 100644
--- a/libpthread/linuxthreads/internals.h
+++ b/libpthread/linuxthreads/internals.h
@@ -29,6 +29,9 @@
#include "semaphore.h"
#include "../linuxthreads_db/thread_dbP.h"
+/* Pretend to be glibc 2.3 as far as gdb is concerned */
+#define VERSION "2.3"
+
#ifndef THREAD_GETMEM
# define THREAD_GETMEM(descr, member) descr->member
#endif
@@ -159,6 +162,8 @@ struct _pthread_descr_struct {
struct pthread_atomic p_resume_count; /* number of times restart() was
called on thread */
char p_woken_by_cancel; /* cancellation performed wakeup */
+ char p_condvar_avail; /* flag if conditional variable became avail */
+ char p_sem_avail; /* flag if semaphore became available */
pthread_extricate_if *p_extricate; /* See above */
pthread_readlock_info *p_readlock_list; /* List of readlock info structs */
pthread_readlock_info *p_readlock_free; /* Free list of structs */
@@ -186,7 +191,7 @@ struct pthread_request {
pthread_descr req_thread; /* Thread doing the request */
enum { /* Request kind */
REQ_CREATE, REQ_FREE, REQ_PROCESS_EXIT, REQ_MAIN_THREAD_EXIT,
- REQ_POST, REQ_DEBUG
+ REQ_POST, REQ_DEBUG, REQ_KICK
} req_kind;
union { /* Arguments for request */
struct { /* For REQ_CREATE: */
@@ -337,6 +342,21 @@ static inline int invalid_handle(pthread_handle h, pthread_t id)
#define CURRENT_STACK_FRAME ({ char __csf; &__csf; })
#endif
+/* If MEMORY_BARRIER isn't defined in pt-machine.h, assume the
+ architecture doesn't need a memory barrier instruction (e.g. Intel
+ x86). Still we need the compiler to respect the barrier and emit
+ all outstanding operations which modify memory. Some architectures
+ distinguish between full, read and write barriers. */
+#ifndef MEMORY_BARRIER
+#define MEMORY_BARRIER() asm ("" : : : "memory")
+#endif
+#ifndef READ_MEMORY_BARRIER
+#define READ_MEMORY_BARRIER() MEMORY_BARRIER()
+#endif
+#ifndef WRITE_MEMORY_BARRIER
+#define WRITE_MEMORY_BARRIER() MEMORY_BARRIER()
+#endif
+
/* Recover thread descriptor for the current thread */
extern pthread_descr __pthread_find_self (void) __attribute__ ((const));
@@ -425,7 +445,6 @@ void __pthread_manager_sighandler(int sig);
void __pthread_reset_main_thread(void);
void __fresetlockfiles(void);
void __pthread_manager_adjust_prio(int thread_prio);
-void __pthread_set_own_extricate_if(pthread_descr self, pthread_extricate_if *peif);
void __pthread_initialize_minimal (void);
extern int __pthread_attr_setguardsize __P ((pthread_attr_t *__attr,
@@ -446,15 +465,15 @@ extern int __pthread_mutexattr_gettype __P ((__const pthread_mutexattr_t *__attr
int *__kind));
extern void __pthread_kill_other_threads_np __P ((void));
-void __pthread_restart_old(pthread_descr th);
-void __pthread_suspend_old(pthread_descr self);
-
-void __pthread_restart_new(pthread_descr th);
-void __pthread_suspend_new(pthread_descr self);
+extern void __pthread_restart_old(pthread_descr th);
+extern void __pthread_suspend_old(pthread_descr self);
+extern int __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abs);
-void __pthread_wait_for_restart_signal(pthread_descr self);
+extern void __pthread_restart_new(pthread_descr th);
+extern void __pthread_suspend_new(pthread_descr self);
+extern int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abs);
-void __pthread_init_condvar(int rt_sig_available);
+extern void __pthread_wait_for_restart_signal(pthread_descr self);
/* Global pointers to old or new suspend functions */
diff --git a/libpthread/linuxthreads/join.c b/libpthread/linuxthreads/join.c
index ccb11b124..cc2dc4ddc 100644
--- a/libpthread/linuxthreads/join.c
+++ b/libpthread/linuxthreads/join.c
@@ -14,9 +14,12 @@
/* Thread termination and joining */
+#include <features.h>
+#define __USE_GNU
#include <errno.h>
#include <sched.h>
#include <unistd.h>
+#include <stdlib.h>
#include "pthread.h"
#include "internals.h"
#include "spinlock.h"
@@ -74,8 +77,13 @@ PDEBUG("joining = %p, pid=%d\n", joining, joining->p_pid);
if (self == __pthread_main_thread && __pthread_manager_request >= 0) {
request.req_thread = self;
request.req_kind = REQ_MAIN_THREAD_EXIT;
- __libc_write(__pthread_manager_request, (char *)&request, sizeof(request));
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *)&request, sizeof(request)));
suspend(self);
+ /* Main thread flushes stdio streams and runs atexit functions.
+ * It also calls a handler within LinuxThreads which sends a process exit
+ * request to the thread manager. */
+ exit(0);
}
/* Exit the process (but don't flush stdio streams, and don't run
atexit functions). */
@@ -168,8 +176,8 @@ PDEBUG("after suspend\n");
request.req_thread = self;
request.req_kind = REQ_FREE;
request.req_args.free.thread_id = thread_id;
- __libc_write(__pthread_manager_request,
- (char *) &request, sizeof(request));
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *) &request, sizeof(request)));
}
return 0;
}
@@ -206,8 +214,8 @@ int pthread_detach(pthread_t thread_id)
request.req_thread = thread_self();
request.req_kind = REQ_FREE;
request.req_args.free.thread_id = thread_id;
- __libc_write(__pthread_manager_request,
- (char *) &request, sizeof(request));
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *) &request, sizeof(request)));
}
return 0;
}
diff --git a/libpthread/linuxthreads/manager.c b/libpthread/linuxthreads/manager.c
index 1b513ca92..f1c9b93af 100644
--- a/libpthread/linuxthreads/manager.c
+++ b/libpthread/linuxthreads/manager.c
@@ -18,6 +18,8 @@
#define __getpid getpid
#define __getpagesize getpagesize
+#include <features.h>
+#define __USE_GNU
#include <errno.h>
#include <sched.h>
#include <stddef.h>
@@ -50,8 +52,8 @@
/* Array of active threads. Entry 0 is reserved for the initial thread. */
struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
-{ { LOCK_INITIALIZER, &__pthread_initial_thread, 0},
- { LOCK_INITIALIZER, &__pthread_manager_thread, 0}, /* All NULLs */ };
+{ { __LOCK_INITIALIZER, &__pthread_initial_thread, 0},
+ { __LOCK_INITIALIZER, &__pthread_manager_thread, 0}, /* All NULLs */ };
/* For debugging purposes put the maximum number of threads in a variable. */
const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
@@ -120,7 +122,7 @@ int __pthread_manager(void *arg)
#else
struct pollfd ufd;
#endif
- sigset_t mask;
+ sigset_t manager_mask;
int n;
struct pthread_request request;
@@ -131,15 +133,19 @@ int __pthread_manager(void *arg)
/* Set the error variable. */
__pthread_manager_thread.p_errnop = &__pthread_manager_thread.p_errno;
__pthread_manager_thread.p_h_errnop = &__pthread_manager_thread.p_h_errno;
+
/* Block all signals except __pthread_sig_cancel and SIGTRAP */
- sigfillset(&mask);
- sigdelset(&mask, __pthread_sig_cancel); /* for thread termination */
- sigdelset(&mask, SIGTRAP); /* for debugging purposes */
- sigprocmask(SIG_SETMASK, &mask, NULL);
+ sigfillset(&manager_mask);
+ sigdelset(&manager_mask, __pthread_sig_cancel); /* for thread termination */
+ sigdelset(&manager_mask, SIGTRAP); /* for debugging purposes */
+ if (__pthread_threads_debug && __pthread_sig_debug > 0)
+ sigdelset(&manager_mask, __pthread_sig_debug);
+ sigprocmask(SIG_SETMASK, &manager_mask, NULL);
/* Raise our priority to match that of main thread */
__pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
/* Synchronize debugging of the thread manager */
- n = __libc_read(reqfd, (char *)&request, sizeof(request));
+ n = TEMP_FAILURE_RETRY(__libc_read(reqfd, (char *)&request,
+ sizeof(request)));
ASSERT(n == sizeof(request) && request.req_kind == REQ_DEBUG);
#ifndef USE_SELECT
ufd.fd = reqfd;
@@ -201,17 +207,25 @@ PDEBUG("got REQ_FREE\n");
break;
case REQ_PROCESS_EXIT:
PDEBUG("got REQ_PROCESS_EXIT from %d, exit code = %d\n",
- request.req_thread, request.req_args.exit.code);
+ request.req_thread, request.req_args.exit.code);
pthread_handle_exit(request.req_thread,
request.req_args.exit.code);
break;
case REQ_MAIN_THREAD_EXIT:
PDEBUG("got REQ_MAIN_THREAD_EXIT\n");
main_thread_exiting = 1;
+ /* Reap children in case all other threads died and the signal handler
+ went off before we set main_thread_exiting to 1, and therefore did
+ not do REQ_KICK. */
+ pthread_reap_children();
+
if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
restart(__pthread_main_thread);
- return 0;
- }
+ /* The main thread will now call exit() which will trigger an
+ __on_exit handler, which in turn will send REQ_PROCESS_EXIT
+ to the thread manager. In case you are wondering how the
+ manager terminates from its loop here. */
+ }
break;
case REQ_POST:
PDEBUG("got REQ_POST\n");
@@ -221,10 +235,14 @@ PDEBUG("got REQ_POST\n");
PDEBUG("got REQ_DEBUG\n");
/* Make gdb aware of new thread and gdb will restart the
new thread when it is ready to handle the new thread. */
- if (__pthread_threads_debug && __pthread_sig_debug > 0)
+ if (__pthread_threads_debug && __pthread_sig_debug > 0) {
PDEBUG("about to call raise(__pthread_sig_debug)\n");
raise(__pthread_sig_debug);
- break;
+ }
+ case REQ_KICK:
+ /* This is just a prod to get the manager to reap some
+ threads right away, avoiding a potential delay at shutdown. */
+ break;
}
}
}
@@ -246,8 +264,9 @@ int __pthread_manager_event(void *arg)
}
/* Process creation */
-
-static int pthread_start_thread(void *arg)
+static int
+__attribute__ ((noreturn))
+pthread_start_thread(void *arg)
{
pthread_descr self = (pthread_descr) arg;
struct pthread_request request;
@@ -282,8 +301,8 @@ PDEBUG("\n");
if (__pthread_threads_debug && __pthread_sig_debug > 0) {
request.req_thread = self;
request.req_kind = REQ_DEBUG;
- __libc_write(__pthread_manager_request,
- (char *) &request, sizeof(request));
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *) &request, sizeof(request)));
suspend(self);
}
/* Run the thread code */
@@ -291,10 +310,11 @@ PDEBUG("\n");
p_start_args.arg));
/* Exit with the given return value */
pthread_exit(outcome);
- return 0;
}
-static int pthread_start_thread_event(void *arg)
+static int
+__attribute__ ((noreturn))
+pthread_start_thread_event(void *arg)
{
pthread_descr self = (pthread_descr) arg;
@@ -310,7 +330,7 @@ static int pthread_start_thread_event(void *arg)
__pthread_unlock (THREAD_GETMEM(self, p_lock));
/* Continue with the real function. */
- return pthread_start_thread (arg);
+ pthread_start_thread (arg);
}
static int pthread_allocate_stack(const pthread_attr_t *attr,
@@ -454,6 +474,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
char *guardaddr = NULL;
size_t guardsize = 0;
int pagesize = __getpagesize();
+ int saved_errno = 0;
/* First check whether we have to change the policy and if yes, whether
we can do this. Normally this should be done by examining the
@@ -549,8 +570,10 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
/* We have to report this event. */
pid = clone(pthread_start_thread_event, (void **) new_thread,
- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
- __pthread_sig_cancel, new_thread);
+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+ __pthread_sig_cancel, new_thread);
+
+ saved_errno = errno;
if (pid != -1)
{
/* Now fill in the information about the new thread in
@@ -577,9 +600,10 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
if (pid == 0)
{
PDEBUG("cloning new_thread = %p\n", new_thread);
- pid = clone(pthread_start_thread, (void **) new_thread,
- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
- __pthread_sig_cancel, new_thread);
+ pid = clone(pthread_start_thread, (void **) new_thread,
+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+ __pthread_sig_cancel, new_thread);
+ saved_errno = errno;
}
/* Check if cloning succeeded */
if (pid == -1) {
@@ -714,15 +738,15 @@ static void pthread_exited(pid_t pid)
/* If we have to signal this event do it now. */
if (th->p_report_events)
{
- /* See whether TD_DEATH is in any of the mask. */
+ /* See whether TD_REAP is in any of the mask. */
int idx = __td_eventword (TD_REAP);
uint32_t mask = __td_eventmask (TD_REAP);
if ((mask & (__pthread_threads_events.event_bits[idx]
| th->p_eventbuf.eventmask.event_bits[idx])) != 0)
{
- /* Yep, we have to signal the death. */
- th->p_eventbuf.eventnum = TD_DEATH;
+ /* Yep, we have to signal the reapage. */
+ th->p_eventbuf.eventnum = TD_REAP;
th->p_eventbuf.eventdata = th;
__pthread_last_event = th;
@@ -742,7 +766,7 @@ static void pthread_exited(pid_t pid)
if (main_thread_exiting &&
__pthread_main_thread->p_nextlive == __pthread_main_thread) {
restart(__pthread_main_thread);
- _exit(0);
+ /* Same logic as REQ_MAIN_THREAD_EXIT. */
}
}
@@ -837,7 +861,23 @@ static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
void __pthread_manager_sighandler(int sig)
{
- terminated_children = 1;
+ int kick_manager = terminated_children == 0 && main_thread_exiting;
+ terminated_children = 1;
+
+ /* If the main thread is terminating, kick the thread manager loop
+ each time some threads terminate. This eliminates a two second
+ shutdown delay caused by the thread manager sleeping in the
+ call to __poll(). Instead, the thread manager is kicked into
+ action, reaps the outstanding threads and resumes the main thread
+ so that it can complete the shutdown. */
+
+ if (kick_manager) {
+ struct pthread_request request;
+ request.req_thread = 0;
+ request.req_kind = REQ_KICK;
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *) &request, sizeof(request)));
+ }
}
/* Adjust priority of thread manager so that it always run at a priority
diff --git a/libpthread/linuxthreads/mutex.c b/libpthread/linuxthreads/mutex.c
index 2ab9e7e55..3c97ea7d6 100644
--- a/libpthread/linuxthreads/mutex.c
+++ b/libpthread/linuxthreads/mutex.c
@@ -12,41 +12,51 @@
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU Library General Public License for more details. */
-/* changes for uClibc: remove strong_alias'es and define the real symbol */
-
/* Mutexes */
-#include <features.h>
-#define __USE_GNU
+#include <bits/libc-lock.h>
#include <errno.h>
#include <sched.h>
#include <stddef.h>
+#include <limits.h>
#include "pthread.h"
#include "internals.h"
#include "spinlock.h"
#include "queue.h"
#include "restart.h"
-int pthread_mutex_init(pthread_mutex_t * mutex,
+int __pthread_mutex_init(pthread_mutex_t * mutex,
const pthread_mutexattr_t * mutex_attr)
{
__pthread_init_lock(&mutex->__m_lock);
mutex->__m_kind =
- mutex_attr == NULL ? PTHREAD_MUTEX_ADAPTIVE_NP : mutex_attr->__mutexkind;
+ mutex_attr == NULL ? PTHREAD_MUTEX_TIMED_NP : mutex_attr->__mutexkind;
mutex->__m_count = 0;
mutex->__m_owner = NULL;
return 0;
}
-//strong_alias (__pthread_mutex_init, pthread_mutex_init)
+strong_alias (__pthread_mutex_init, pthread_mutex_init)
-int pthread_mutex_destroy(pthread_mutex_t * mutex)
+int __pthread_mutex_destroy(pthread_mutex_t * mutex)
{
- if (mutex->__m_lock.__status != 0) return EBUSY;
- return 0;
+ switch (mutex->__m_kind) {
+ case PTHREAD_MUTEX_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_RECURSIVE_NP:
+ if ((mutex->__m_lock.__status & 1) != 0)
+ return EBUSY;
+ return 0;
+ case PTHREAD_MUTEX_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_TIMED_NP:
+ if (mutex->__m_lock.__status != 0)
+ return EBUSY;
+ return 0;
+ default:
+ return EINVAL;
+ }
}
-//strong_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
+strong_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
-int pthread_mutex_trylock(pthread_mutex_t * mutex)
+int __pthread_mutex_trylock(pthread_mutex_t * mutex)
{
pthread_descr self;
int retcode;
@@ -68,18 +78,21 @@ int pthread_mutex_trylock(pthread_mutex_t * mutex)
}
return retcode;
case PTHREAD_MUTEX_ERRORCHECK_NP:
- retcode = __pthread_trylock(&mutex->__m_lock);
+ retcode = __pthread_alt_trylock(&mutex->__m_lock);
if (retcode == 0) {
mutex->__m_owner = thread_self();
}
return retcode;
+ case PTHREAD_MUTEX_TIMED_NP:
+ retcode = __pthread_alt_trylock(&mutex->__m_lock);
+ return retcode;
default:
return EINVAL;
}
}
-//strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
+strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
-int pthread_mutex_lock(pthread_mutex_t * mutex)
+int __pthread_mutex_lock(pthread_mutex_t * mutex)
{
pthread_descr self;
@@ -100,22 +113,71 @@ int pthread_mutex_lock(pthread_mutex_t * mutex)
case PTHREAD_MUTEX_ERRORCHECK_NP:
self = thread_self();
if (mutex->__m_owner == self) return EDEADLK;
+ __pthread_alt_lock(&mutex->__m_lock, self);
+ mutex->__m_owner = self;
+ return 0;
+ case PTHREAD_MUTEX_TIMED_NP:
+ __pthread_alt_lock(&mutex->__m_lock, NULL);
+ return 0;
+ default:
+ return EINVAL;
+ }
+}
+strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
+
+int __pthread_mutex_timedlock (pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ pthread_descr self;
+ int res;
+
+ if (__builtin_expect (abstime->tv_nsec, 0) < 0
+ || __builtin_expect (abstime->tv_nsec, 0) >= 1000000000)
+ return EINVAL;
+
+ switch(mutex->__m_kind) {
+ case PTHREAD_MUTEX_ADAPTIVE_NP:
+ __pthread_lock(&mutex->__m_lock, NULL);
+ return 0;
+ case PTHREAD_MUTEX_RECURSIVE_NP:
+ self = thread_self();
+ if (mutex->__m_owner == self) {
+ mutex->__m_count++;
+ return 0;
+ }
__pthread_lock(&mutex->__m_lock, self);
mutex->__m_owner = self;
+ mutex->__m_count = 0;
return 0;
+ case PTHREAD_MUTEX_ERRORCHECK_NP:
+ self = thread_self();
+ if (mutex->__m_owner == self) return EDEADLK;
+ res = __pthread_alt_timedlock(&mutex->__m_lock, self, abstime);
+ if (res != 0)
+ {
+ mutex->__m_owner = self;
+ return 0;
+ }
+ return ETIMEDOUT;
+ case PTHREAD_MUTEX_TIMED_NP:
+ /* Only this type supports timed out lock. */
+ return (__pthread_alt_timedlock(&mutex->__m_lock, NULL, abstime)
+ ? 0 : ETIMEDOUT);
default:
return EINVAL;
}
}
-//strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
+strong_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)
-int pthread_mutex_unlock(pthread_mutex_t * mutex)
+int __pthread_mutex_unlock(pthread_mutex_t * mutex)
{
switch (mutex->__m_kind) {
case PTHREAD_MUTEX_ADAPTIVE_NP:
__pthread_unlock(&mutex->__m_lock);
return 0;
case PTHREAD_MUTEX_RECURSIVE_NP:
+ if (mutex->__m_owner != thread_self())
+ return EPERM;
if (mutex->__m_count > 0) {
mutex->__m_count--;
return 0;
@@ -127,38 +189,42 @@ int pthread_mutex_unlock(pthread_mutex_t * mutex)
if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0)
return EPERM;
mutex->__m_owner = NULL;
- __pthread_unlock(&mutex->__m_lock);
+ __pthread_alt_unlock(&mutex->__m_lock);
+ return 0;
+ case PTHREAD_MUTEX_TIMED_NP:
+ __pthread_alt_unlock(&mutex->__m_lock);
return 0;
default:
return EINVAL;
}
}
-//strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
+strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
-int pthread_mutexattr_init(pthread_mutexattr_t *attr)
+int __pthread_mutexattr_init(pthread_mutexattr_t *attr)
{
- attr->__mutexkind = PTHREAD_MUTEX_ADAPTIVE_NP;
+ attr->__mutexkind = PTHREAD_MUTEX_TIMED_NP;
return 0;
}
-//strong_alias (__pthread_mutexattr_init, pthread_mutexattr_init)
+strong_alias (__pthread_mutexattr_init, pthread_mutexattr_init)
-int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
+int __pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
{
return 0;
}
-//strong_alias (__pthread_mutexattr_destroy, pthread_mutexattr_destroy)
+strong_alias (__pthread_mutexattr_destroy, pthread_mutexattr_destroy)
int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind)
{
if (kind != PTHREAD_MUTEX_ADAPTIVE_NP
&& kind != PTHREAD_MUTEX_RECURSIVE_NP
- && kind != PTHREAD_MUTEX_ERRORCHECK_NP)
+ && kind != PTHREAD_MUTEX_ERRORCHECK_NP
+ && kind != PTHREAD_MUTEX_TIMED_NP)
return EINVAL;
attr->__mutexkind = kind;
return 0;
}
weak_alias (__pthread_mutexattr_settype, pthread_mutexattr_settype)
-weak_alias ( __pthread_mutexattr_settype, __pthread_mutexattr_setkind_np)
+strong_alias ( __pthread_mutexattr_settype, __pthread_mutexattr_setkind_np)
weak_alias (__pthread_mutexattr_setkind_np, pthread_mutexattr_setkind_np)
int __pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *kind)
@@ -167,24 +233,27 @@ int __pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *kind)
return 0;
}
weak_alias (__pthread_mutexattr_gettype, pthread_mutexattr_gettype)
-weak_alias (__pthread_mutexattr_gettype, __pthread_mutexattr_getkind_np)
+strong_alias (__pthread_mutexattr_gettype, __pthread_mutexattr_getkind_np)
weak_alias (__pthread_mutexattr_getkind_np, pthread_mutexattr_getkind_np)
-int __pthread_mutexattr_getpshared (const pthread_mutexattr_t *attr, int *pshared)
+int __pthread_mutexattr_getpshared (const pthread_mutexattr_t *attr,
+ int *pshared)
{
- *pshared = PTHREAD_PROCESS_PRIVATE;
- return 0;
+ *pshared = PTHREAD_PROCESS_PRIVATE;
+ return 0;
}
weak_alias (__pthread_mutexattr_getpshared, pthread_mutexattr_getpshared)
int __pthread_mutexattr_setpshared (pthread_mutexattr_t *attr, int pshared)
{
- if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
- return EINVAL;
- /* For now it is not possible to shared a conditional variable. */
- if (pshared != PTHREAD_PROCESS_PRIVATE)
- return ENOSYS;
- return 0;
+ if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
+ return EINVAL;
+
+ /* For now it is not possible to shared a conditional variable. */
+ if (pshared != PTHREAD_PROCESS_PRIVATE)
+ return ENOSYS;
+
+ return 0;
}
weak_alias (__pthread_mutexattr_setpshared, pthread_mutexattr_setpshared)
@@ -192,30 +261,97 @@ weak_alias (__pthread_mutexattr_setpshared, pthread_mutexattr_setpshared)
static pthread_mutex_t once_masterlock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t once_finished = PTHREAD_COND_INITIALIZER;
+static int fork_generation = 0; /* Child process increments this after fork. */
enum { NEVER = 0, IN_PROGRESS = 1, DONE = 2 };
+/* If a thread is canceled while calling the init_routine out of
+ pthread once, this handler will reset the once_control variable
+ to the NEVER state. */
+
+static void pthread_once_cancelhandler(void *arg)
+{
+ pthread_once_t *once_control = arg;
+
+ pthread_mutex_lock(&once_masterlock);
+ *once_control = NEVER;
+ pthread_mutex_unlock(&once_masterlock);
+ pthread_cond_broadcast(&once_finished);
+}
+
int __pthread_once(pthread_once_t * once_control, void (*init_routine)(void))
{
+ /* flag for doing the condition broadcast outside of mutex */
+ int state_changed;
+
/* Test without locking first for speed */
- if (*once_control == DONE) return 0;
+ if (*once_control == DONE) {
+ READ_MEMORY_BARRIER();
+ return 0;
+ }
/* Lock and test again */
+
+ state_changed = 0;
+
pthread_mutex_lock(&once_masterlock);
+
+ /* If this object was left in an IN_PROGRESS state in a parent
+ process (indicated by stale generation field), reset it to NEVER. */
+ if ((*once_control & 3) == IN_PROGRESS && (*once_control & ~3) != fork_generation)
+ *once_control = NEVER;
+
/* If init_routine is being called from another routine, wait until
it completes. */
- while (*once_control == IN_PROGRESS) {
+ while ((*once_control & 3) == IN_PROGRESS) {
pthread_cond_wait(&once_finished, &once_masterlock);
}
/* Here *once_control is stable and either NEVER or DONE. */
if (*once_control == NEVER) {
- *once_control = IN_PROGRESS;
+ *once_control = IN_PROGRESS | fork_generation;
pthread_mutex_unlock(&once_masterlock);
+ pthread_cleanup_push(pthread_once_cancelhandler, once_control);
init_routine();
+ pthread_cleanup_pop(0);
pthread_mutex_lock(&once_masterlock);
+ WRITE_MEMORY_BARRIER();
*once_control = DONE;
- pthread_cond_broadcast(&once_finished);
+ state_changed = 1;
}
pthread_mutex_unlock(&once_masterlock);
+
+ if (state_changed)
+ pthread_cond_broadcast(&once_finished);
+
return 0;
}
strong_alias (__pthread_once, pthread_once)
+
+/*
+ * Handle the state of the pthread_once mechanism across forks. The
+ * once_masterlock is acquired in the parent process prior to a fork to ensure
+ * that no thread is in the critical region protected by the lock. After the
+ * fork, the lock is released. In the child, the lock and the condition
+ * variable are simply reset. The child also increments its generation
+ * counter which lets pthread_once calls detect stale IN_PROGRESS states
+ * and reset them back to NEVER.
+ */
+
+void __pthread_once_fork_prepare(void)
+{
+ pthread_mutex_lock(&once_masterlock);
+}
+
+void __pthread_once_fork_parent(void)
+{
+ pthread_mutex_unlock(&once_masterlock);
+}
+
+void __pthread_once_fork_child(void)
+{
+ pthread_mutex_init(&once_masterlock, NULL);
+ pthread_cond_init(&once_finished, NULL);
+ if (fork_generation <= INT_MAX - 4)
+ fork_generation += 4; /* leave least significant two bits zero */
+ else
+ fork_generation = 0;
+}
diff --git a/libpthread/linuxthreads/pthread.c b/libpthread/linuxthreads/pthread.c
index 12ee2fd98..61f6b582a 100644
--- a/libpthread/linuxthreads/pthread.c
+++ b/libpthread/linuxthreads/pthread.c
@@ -16,6 +16,7 @@
#define __FORCE_GLIBC
#include <features.h>
+#define __USE_GNU
#include <errno.h>
#include <netdb.h> /* for h_errno */
#include <stddef.h>
@@ -90,8 +91,10 @@ struct _pthread_descr_struct __pthread_initial_thread = {
0, /* Always index 0 */
0, /* int p_report_events */
{{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
- ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
+ __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
0, /* char p_woken_by_cancel */
+ 0, /* char p_condvar_avail */
+ 0, /* char p_sem_avail */
NULL, /* struct pthread_extricate_if *p_extricate */
NULL, /* pthread_readlock_info *p_readlock_list; */
NULL, /* pthread_readlock_info *p_readlock_free; */
@@ -140,8 +143,10 @@ struct _pthread_descr_struct __pthread_manager_thread = {
1, /* Always index 1 */
0, /* int p_report_events */
{{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
- ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
+ __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
0, /* char p_woken_by_cancel */
+ 0, /* char p_condvar_avail */
+ 0, /* char p_sem_avail */
NULL, /* struct pthread_extricate_if *p_extricate */
NULL, /* pthread_readlock_info *p_readlock_list; */
NULL, /* pthread_readlock_info *p_readlock_free; */
@@ -189,45 +194,79 @@ int __pthread_exit_code = 0;
const int __pthread_threads_max = PTHREAD_THREADS_MAX;
const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
-const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
- h_descr);
+const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct, h_descr);
const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
p_pid);
const int __linuxthreads_pthread_sizeof_descr
= sizeof(struct _pthread_descr_struct);
+const int __linuxthreads_initial_report_events;
-/* Forward declarations */
+const char __linuxthreads_version[] = VERSION;
-static void pthread_exit_process(int retcode, void *arg);
-#ifndef __i386__
+/* Forward declarations */
+static void pthread_onexit_process(int retcode, void *arg);
static void pthread_handle_sigcancel(int sig);
static void pthread_handle_sigrestart(int sig);
-#else
-static void pthread_handle_sigcancel(int sig, struct sigcontext ctx);
-static void pthread_handle_sigrestart(int sig, struct sigcontext ctx);
-#endif
static void pthread_handle_sigdebug(int sig);
+int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime);
/* Signal numbers used for the communication.
In these variables we keep track of the used variables. If the
platform does not support any real-time signals we will define the
values to some unreasonable value which will signal failing of all
the functions below. */
-#ifdef __NR_rt_sigaction
+#ifndef __NR_rt_sigaction
+static int current_rtmin = -1;
+static int current_rtmax = -1;
+int __pthread_sig_restart = SIGUSR1;
+int __pthread_sig_cancel = SIGUSR2;
+int __pthread_sig_debug;
+#else
+
+#if __SIGRTMAX - __SIGRTMIN >= 3
+static int current_rtmin = __SIGRTMIN + 3;
+static int current_rtmax = __SIGRTMAX;
int __pthread_sig_restart = __SIGRTMIN;
int __pthread_sig_cancel = __SIGRTMIN + 1;
int __pthread_sig_debug = __SIGRTMIN + 2;
void (*__pthread_restart)(pthread_descr) = __pthread_restart_new;
void (*__pthread_suspend)(pthread_descr) = __pthread_wait_for_restart_signal;
+int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_new;
#else
+static int current_rtmin = __SIGRTMIN;
+static int current_rtmax = __SIGRTMAX;
int __pthread_sig_restart = SIGUSR1;
int __pthread_sig_cancel = SIGUSR2;
-int __pthread_sig_debug = 0;
-/* Pointers that select new or old suspend/resume functions
- based on availability of rt signals. */
+int __pthread_sig_debug;
void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
+int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
+
+#endif
+
+/* Return number of available real-time signal with highest priority. */
+int __libc_current_sigrtmin (void)
+{
+ return current_rtmin;
+}
+
+/* Return number of available real-time signal with lowest priority. */
+int __libc_current_sigrtmax (void)
+{
+ return current_rtmax;
+}
+
+/* Allocate real-time signal with highest/lowest available
+ priority. Please note that we don't use a lock since we assume
+ this function to be called at program start. */
+int __libc_allocate_rtsig (int high)
+{
+ if (current_rtmin == -1 || current_rtmin > current_rtmax)
+ /* We don't have anymore signal available. */
+ return -1;
+ return high ? current_rtmin++ : current_rtmax--;
+}
#endif
/* Initialize the pthread library.
@@ -307,39 +346,27 @@ static void pthread_initialize(void)
/* Setup signal handlers for the initial thread.
Since signal handlers are shared between threads, these settings
will be inherited by all other threads. */
-#ifndef __i386__
sa.sa_handler = pthread_handle_sigrestart;
-#else
- sa.sa_handler = (__sighandler_t) pthread_handle_sigrestart;
-#endif
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
__libc_sigaction(__pthread_sig_restart, &sa, NULL);
-#ifndef __i386__
sa.sa_handler = pthread_handle_sigcancel;
-#else
- sa.sa_handler = (__sighandler_t) pthread_handle_sigcancel;
-#endif
- sa.sa_flags = 0;
+ // sa.sa_flags = 0;
__libc_sigaction(__pthread_sig_cancel, &sa, NULL);
if (__pthread_sig_debug > 0) {
- sa.sa_handler = pthread_handle_sigdebug;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = 0;
- __libc_sigaction(__pthread_sig_debug, &sa, NULL);
+ sa.sa_handler = pthread_handle_sigdebug;
+ sigemptyset(&sa.sa_mask);
+ // sa.sa_flags = 0;
+ __libc_sigaction(__pthread_sig_debug, &sa, NULL);
}
/* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
sigemptyset(&mask);
sigaddset(&mask, __pthread_sig_restart);
-PDEBUG("block mask = %x\n", mask);
sigprocmask(SIG_BLOCK, &mask, NULL);
/* Register an exit function to kill all other threads. */
/* Do it early so that user-registered atexit functions are called
- before pthread_exit_process. */
- on_exit(pthread_exit_process, NULL);
-#ifdef __NR_rt_sigaction
- __pthread_init_condvar(1);
-#endif
+ before pthread_onexit_process. */
+ on_exit(pthread_onexit_process, NULL);
}
void __pthread_initialize(void)
@@ -351,6 +378,7 @@ int __pthread_initialize_manager(void)
{
int manager_pipe[2];
int pid;
+ int report_events;
struct pthread_request request;
/* If basic initialization not done yet (e.g. we're called from a
@@ -379,7 +407,18 @@ int __pthread_initialize_manager(void)
}
/* Start the thread manager */
pid = 0;
- if (__pthread_initial_thread.p_report_events)
+#ifdef USE_TLS
+ if (__linuxthreads_initial_report_events != 0)
+ THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
+ __linuxthreads_initial_report_events);
+ report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
+#else
+ if (__linuxthreads_initial_report_events != 0)
+ __pthread_initial_thread.p_report_events
+ = __linuxthreads_initial_report_events;
+ report_events = __pthread_initial_thread.p_report_events;
+#endif
+ if (__builtin_expect (report_events, 0))
{
/* It's a bit more complicated. We have to report the creation of
the manager thread. */
@@ -391,7 +430,7 @@ int __pthread_initialize_manager(void)
!= 0)
{
- __pthread_lock(__pthread_manager_thread.p_lock, NULL);
+ __pthread_lock(__pthread_manager_thread.p_lock, NULL);
pid = clone(__pthread_manager_event,
(void **) __pthread_manager_thread_tos,
@@ -405,7 +444,7 @@ int __pthread_initialize_manager(void)
the new thread do this since we don't know whether it was
already scheduled when we send the event. */
__pthread_manager_thread.p_eventbuf.eventdata =
- &__pthread_manager_thread;
+ &__pthread_manager_thread;
__pthread_manager_thread.p_eventbuf.eventnum = TD_CREATE;
__pthread_last_event = &__pthread_manager_thread;
__pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
@@ -434,6 +473,7 @@ int __pthread_initialize_manager(void)
__pthread_manager_reader = manager_pipe[0]; /* reading end */
__pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
__pthread_manager_thread.p_pid = pid;
+
/* Make gdb aware of new thread manager */
if (__pthread_threads_debug && __pthread_sig_debug > 0)
{
@@ -445,7 +485,8 @@ int __pthread_initialize_manager(void)
/* Synchronize debugging of the thread manager */
PDEBUG("send REQ_DEBUG to manager thread\n");
request.req_kind = REQ_DEBUG;
- __libc_write(__pthread_manager_request, (char *) &request, sizeof(request));
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *) &request, sizeof(request)));
return 0;
}
@@ -467,7 +508,8 @@ int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
&request.req_args.create.mask);
PDEBUG("write REQ_CREATE to manager thread\n");
- __libc_write(__pthread_manager_request, (char *) &request, sizeof(request));
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *) &request, sizeof(request)));
PDEBUG("before suspend(self)\n");
suspend(self);
PDEBUG("after suspend(self)\n");
@@ -562,45 +604,39 @@ int pthread_getschedparam(pthread_t thread, int *policy,
/* Process-wide exit() request */
-static void pthread_exit_process(int retcode, void *arg)
+static void pthread_onexit_process(int retcode, void *arg)
{
- struct pthread_request request;
- pthread_descr self = thread_self();
-
- if (__pthread_manager_request >= 0) {
- request.req_thread = self;
- request.req_kind = REQ_PROCESS_EXIT;
- request.req_args.exit.code = retcode;
- __libc_write(__pthread_manager_request,
- (char *) &request, sizeof(request));
- suspend(self);
- /* Main thread should accumulate times for thread manager and its
- children, so that timings for main thread account for all threads. */
- if (self == __pthread_main_thread)
- waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
- }
+ struct pthread_request request;
+ pthread_descr self = thread_self();
+
+ if (__pthread_manager_request >= 0) {
+ request.req_thread = self;
+ request.req_kind = REQ_PROCESS_EXIT;
+ request.req_args.exit.code = retcode;
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *) &request, sizeof(request)));
+ suspend(self);
+ /* Main thread should accumulate times for thread manager and its
+ children, so that timings for main thread account for all threads. */
+ if (self == __pthread_main_thread) {
+ waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
+ /* Since all threads have been asynchronously terminated
+ * (possibly holding locks), free cannot be used any more. */
+ __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
+ }
+ }
}
/* The handler for the RESTART signal just records the signal received
in the thread descriptor, and optionally performs a siglongjmp
(for pthread_cond_timedwait). */
-#ifndef __i386__
static void pthread_handle_sigrestart(int sig)
{
- pthread_descr self = thread_self();
- PDEBUG("got called in non-i386 mode for %u\n", self);
-#else
-static void pthread_handle_sigrestart(int sig, struct sigcontext ctx)
-{
- pthread_descr self;
- asm volatile ("movw %w0,%%gs" : : "r" (ctx.gs));
- self = thread_self();
- PDEBUG("got called in i386-mode for %u\n", self);
-#endif
- THREAD_SETMEM(self, p_signal, sig);
- if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
- siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
+ pthread_descr self = thread_self();
+ THREAD_SETMEM(self, p_signal, sig);
+ if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
+ siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
}
/* The handler for the CANCEL signal checks for cancellation
@@ -608,33 +644,48 @@ static void pthread_handle_sigrestart(int sig, struct sigcontext ctx)
For the thread manager thread, redirect the signal to
__pthread_manager_sighandler. */
-#ifndef __i386__
static void pthread_handle_sigcancel(int sig)
{
pthread_descr self = thread_self();
sigjmp_buf * jmpbuf;
-#else
-static void pthread_handle_sigcancel(int sig, struct sigcontext ctx)
-{
- pthread_descr self;
- sigjmp_buf * jmpbuf;
- asm volatile ("movw %w0,%%gs" : : "r" (ctx.gs));
- self = thread_self();
-#endif
+
if (self == &__pthread_manager_thread)
{
+#ifdef THREAD_SELF
+ /* A new thread might get a cancel signal before it is fully
+ initialized, so that the thread register might still point to the
+ manager thread. Double check that this is really the manager
+ thread. */
+ pthread_descr real_self = thread_self_stack();
+ if (real_self == &__pthread_manager_thread)
+ {
+ __pthread_manager_sighandler(sig);
+ return;
+ }
+ /* Oops, thread_self() isn't working yet.. */
+ self = real_self;
+# ifdef INIT_THREAD_SELF
+ INIT_THREAD_SELF(self, self->p_nr);
+# endif
+#else
__pthread_manager_sighandler(sig);
return;
+#endif
}
- if (__pthread_exit_requested) {
+ if (__builtin_expect (__pthread_exit_requested, 0)) {
/* Main thread should accumulate times for thread manager and its
children, so that timings for main thread account for all threads. */
- if (self == __pthread_main_thread)
+ if (self == __pthread_main_thread) {
+#ifdef USE_TLS
+ waitpid(__pthread_manager_thread->p_pid, NULL, __WCLONE);
+#else
waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
+#endif
+ }
_exit(__pthread_exit_code);
}
- if (THREAD_GETMEM(self, p_canceled)
+ if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
pthread_exit(PTHREAD_CANCELED);
@@ -698,7 +749,7 @@ void __pthread_kill_other_threads_np(void)
{
struct sigaction sa;
/* Terminate all other threads and thread manager */
- pthread_exit_process(0, NULL);
+ pthread_onexit_process(0, NULL);
/* Make current thread the main thread in case the calling thread
changes its mind, does not exec(), and creates new threads instead. */
__pthread_reset_main_thread();
@@ -732,65 +783,188 @@ int __pthread_getconcurrency(void)
}
weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
-void __pthread_set_own_extricate_if(pthread_descr self, pthread_extricate_if *peif)
-{
- __pthread_lock(self->p_lock, self);
- THREAD_SETMEM(self, p_extricate, peif);
- __pthread_unlock(self->p_lock);
-}
/* Primitives for controlling thread execution */
void __pthread_wait_for_restart_signal(pthread_descr self)
{
- sigset_t mask;
+ sigset_t mask;
- sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
- sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
- do {
- self->p_signal = 0;
- PDEBUG("temporary block mask = %x\n", mask);
- sigsuspend(&mask); /* Wait for signal */
- PDEBUG(" *** after sigsuspend *** \n");
- } while (self->p_signal !=__pthread_sig_restart );
-}
+ sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
+ sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
+ THREAD_SETMEM(self, p_signal, 0);
+ do {
+ sigsuspend(&mask); /* Wait for signal */
+ } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
-#ifdef __NR_rt_sigaction
-void __pthread_restart_new(pthread_descr th)
-{
- kill(th->p_pid, __pthread_sig_restart);
+ READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
}
-/* There is no __pthread_suspend_new because it would just
- be a wasteful wrapper for __pthread_wait_for_restart_signal */
-#if 0
-void __pthread_suspend_new(pthread_descr th)
-{
- __pthread_wait_for_restart_signal(th);
-}
-#endif
-
-#else
-/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT signals.
+#ifndef __NR_rt_sigaction
+/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
+ signals.
On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
Since the restart signal does not queue, we use an atomic counter to create
queuing semantics. This is needed to resolve a rare race condition in
pthread_cond_timedwait_relative. */
+
void __pthread_restart_old(pthread_descr th)
{
- if (atomic_increment(&th->p_resume_count) == -1)
- kill(th->p_pid, __pthread_sig_restart);
+ if (atomic_increment(&th->p_resume_count) == -1)
+ kill(th->p_pid, __pthread_sig_restart);
}
void __pthread_suspend_old(pthread_descr self)
{
- if (atomic_decrement(&self->p_resume_count) <= 0)
- __pthread_wait_for_restart_signal(self);
+ if (atomic_decrement(&self->p_resume_count) <= 0)
+ __pthread_wait_for_restart_signal(self);
}
-#endif
-/* There is no __pthread_suspend_new because it would just
- be a wasteful wrapper for __pthread_wait_for_restart_signal */
+int
+__pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
+{
+ sigset_t unblock, initial_mask;
+ int was_signalled = 0;
+ sigjmp_buf jmpbuf;
+
+ if (atomic_decrement(&self->p_resume_count) == 0) {
+ /* Set up a longjmp handler for the restart signal, unblock
+ the signal and sleep. */
+
+ if (sigsetjmp(jmpbuf, 1) == 0) {
+ THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
+ THREAD_SETMEM(self, p_signal, 0);
+ /* Unblock the restart signal */
+ sigemptyset(&unblock);
+ sigaddset(&unblock, __pthread_sig_restart);
+ sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
+
+ while (1) {
+ struct timeval now;
+ struct timespec reltime;
+
+ /* Compute a time offset relative to now. */
+ __gettimeofday (&now, NULL);
+ reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
+ reltime.tv_sec = abstime->tv_sec - now.tv_sec;
+ if (reltime.tv_nsec < 0) {
+ reltime.tv_nsec += 1000000000;
+ reltime.tv_sec -= 1;
+ }
+
+ /* Sleep for the required duration. If woken by a signal,
+ resume waiting as required by Single Unix Specification. */
+ if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
+ break;
+ }
+
+ /* Block the restart signal again */
+ sigprocmask(SIG_SETMASK, &initial_mask, NULL);
+ was_signalled = 0;
+ } else {
+ was_signalled = 1;
+ }
+ THREAD_SETMEM(self, p_signal_jmp, NULL);
+ }
+
+ /* Now was_signalled is true if we exited the above code
+ due to the delivery of a restart signal. In that case,
+ we know we have been dequeued and resumed and that the
+ resume count is balanced. Otherwise, there are some
+ cases to consider. First, try to bump up the resume count
+ back to zero. If it goes to 1, it means restart() was
+ invoked on this thread. The signal must be consumed
+ and the count bumped down and everything is cool. We
+ can return a 1 to the caller.
+ Otherwise, no restart was delivered yet, so a potential
+ race exists; we return a 0 to the caller which must deal
+ with this race in an appropriate way; for example by
+ atomically removing the thread from consideration for a
+ wakeup---if such a thing fails, it means a restart is
+ being delivered. */
+
+ if (!was_signalled) {
+ if (atomic_increment(&self->p_resume_count) != -1) {
+ __pthread_wait_for_restart_signal(self);
+ atomic_decrement(&self->p_resume_count); /* should be zero now! */
+ /* woke spontaneously and consumed restart signal */
+ return 1;
+ }
+ /* woke spontaneously but did not consume restart---caller must resolve */
+ return 0;
+ }
+ /* woken due to restart signal */
+ return 1;
+}
+#endif /* __NR_rt_sigaction */
+
+
+#ifdef __NR_rt_sigaction
+void __pthread_restart_new(pthread_descr th)
+{
+ /* The barrier is proabably not needed, in which case it still documents
+ our assumptions. The intent is to commit previous writes to shared
+ memory so the woken thread will have a consistent view. Complementary
+ read barriers are present to the suspend functions. */
+ WRITE_MEMORY_BARRIER();
+ kill(th->p_pid, __pthread_sig_restart);
+}
+
+int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
+{
+ sigset_t unblock, initial_mask;
+ int was_signalled = 0;
+ sigjmp_buf jmpbuf;
+
+ if (sigsetjmp(jmpbuf, 1) == 0) {
+ THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
+ THREAD_SETMEM(self, p_signal, 0);
+ /* Unblock the restart signal */
+ sigemptyset(&unblock);
+ sigaddset(&unblock, __pthread_sig_restart);
+ sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
+
+ while (1) {
+ struct timeval now;
+ struct timespec reltime;
+
+ /* Compute a time offset relative to now. */
+ gettimeofday (&now, NULL);
+ reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
+ reltime.tv_sec = abstime->tv_sec - now.tv_sec;
+ if (reltime.tv_nsec < 0) {
+ reltime.tv_nsec += 1000000000;
+ reltime.tv_sec -= 1;
+ }
+
+ /* Sleep for the required duration. If woken by a signal,
+ resume waiting as required by Single Unix Specification. */
+ if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
+ break;
+ }
+
+ /* Block the restart signal again */
+ sigprocmask(SIG_SETMASK, &initial_mask, NULL);
+ was_signalled = 0;
+ } else {
+ was_signalled = 1;
+ }
+ THREAD_SETMEM(self, p_signal_jmp, NULL);
+
+ /* Now was_signalled is true if we exited the above code
+ due to the delivery of a restart signal. In that case,
+ everything is cool. We have been removed from whatever
+ we were waiting on by the other thread, and consumed its signal.
+
+ Otherwise we this thread woke up spontaneously, or due to a signal other
+ than restart. This is an ambiguous case that must be resolved by
+ the caller; the thread is still eligible for a restart wakeup
+ so there is a race. */
+
+ READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
+ return was_signalled;
+}
+#endif
/* Debugging aid */
@@ -805,7 +979,7 @@ void __pthread_message(char * fmt, ...)
va_start(args, fmt);
vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
va_end(args);
- __libc_write(2, buffer, strlen(buffer));
+ TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));
}
#endif
diff --git a/libpthread/linuxthreads/restart.h b/libpthread/linuxthreads/restart.h
index 702d7d15c..f72fb709f 100644
--- a/libpthread/linuxthreads/restart.h
+++ b/libpthread/linuxthreads/restart.h
@@ -13,15 +13,37 @@
/* GNU Library General Public License for more details. */
#include <signal.h>
+#include <sys/syscall.h>
/* Primitives for controlling thread execution */
static inline void restart(pthread_descr th)
{
- __pthread_restart(th); /* see pthread.c */
+ /* See pthread.c */
+#ifdef __NR_rt_sigaction
+ __pthread_restart_new(th);
+#else
+ __pthread_restart(th);
+#endif
}
static inline void suspend(pthread_descr self)
{
- __pthread_suspend(self); /* see pthread.c */
+ /* See pthread.c */
+#ifdef __NR_rt_sigaction
+ __pthread_wait_for_restart_signal(self);
+#else
+ __pthread_suspend(self);
+#endif
+}
+
+static inline int timedsuspend(pthread_descr self,
+ const struct timespec *abstime)
+{
+ /* See pthread.c */
+#ifdef __NR_rt_sigaction
+ return __pthread_timedsuspend_new(self, abstime);
+#else
+ return __pthread_timedsuspend(self, abstime);
+#endif
}
diff --git a/libpthread/linuxthreads/semaphore.c b/libpthread/linuxthreads/semaphore.c
index 0297b3a1e..0038ddd2a 100644
--- a/libpthread/linuxthreads/semaphore.c
+++ b/libpthread/linuxthreads/semaphore.c
@@ -14,6 +14,8 @@
/* Semaphores a la POSIX 1003.1b */
+#include <features.h>
+#define __USE_GNU
#include <errno.h>
#include "pthread.h"
#include "semaphore.h"
@@ -32,7 +34,7 @@ int __new_sem_init(sem_t *sem, int pshared, unsigned int value)
errno = ENOSYS;
return -1;
}
- __pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
+ __pthread_init_lock(&sem->__sem_lock);
sem->__sem_value = value;
sem->__sem_waiting = NULL;
return 0;
@@ -47,9 +49,9 @@ static int new_sem_extricate_func(void *obj, pthread_descr th)
sem_t *sem = obj;
int did_remove = 0;
- __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+ __pthread_lock(&sem->__sem_lock, self);
did_remove = remove_from_queue(&sem->__sem_waiting, th);
- __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+ __pthread_unlock(&sem->__sem_lock);
return did_remove;
}
@@ -59,35 +61,50 @@ int __new_sem_wait(sem_t * sem)
volatile pthread_descr self = thread_self();
pthread_extricate_if extr;
int already_canceled = 0;
+ int spurious_wakeup_count;
/* Set up extrication interface */
extr.pu_object = sem;
extr.pu_extricate_func = new_sem_extricate_func;
- __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+ __pthread_lock(&sem->__sem_lock, self);
if (sem->__sem_value > 0) {
sem->__sem_value--;
- __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+ __pthread_unlock(&sem->__sem_lock);
return 0;
}
/* Register extrication interface */
- __pthread_set_own_extricate_if(self, &extr);
+ THREAD_SETMEM(self, p_sem_avail, 0);
+ __pthread_set_own_extricate_if(self, &extr);
/* Enqueue only if not already cancelled. */
if (!(THREAD_GETMEM(self, p_canceled)
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
enqueue(&sem->__sem_waiting, self);
else
already_canceled = 1;
- __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+ __pthread_unlock(&sem->__sem_lock);
if (already_canceled) {
- __pthread_set_own_extricate_if(self, 0);
+ __pthread_set_own_extricate_if(self, 0);
pthread_exit(PTHREAD_CANCELED);
- }
+ }
/* Wait for sem_post or cancellation, or fall through if already canceled */
- suspend(self);
- __pthread_set_own_extricate_if(self, 0);
+ spurious_wakeup_count = 0;
+ while (1)
+ {
+ suspend(self);
+ if (THREAD_GETMEM(self, p_sem_avail) == 0
+ && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
+ || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
+ {
+ /* Count resumes that don't belong to us. */
+ spurious_wakeup_count++;
+ continue;
+ }
+ break;
+ }
+ __pthread_set_own_extricate_if(self, 0);
/* Terminate only if the wakeup came from cancellation. */
/* Otherwise ignore cancellation because we got the semaphore. */
@@ -105,7 +122,7 @@ int __new_sem_trywait(sem_t * sem)
{
int retval;
- __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
+ __pthread_lock(&sem->__sem_lock, NULL);
if (sem->__sem_value == 0) {
errno = EAGAIN;
retval = -1;
@@ -113,7 +130,7 @@ int __new_sem_trywait(sem_t * sem)
sem->__sem_value--;
retval = 0;
}
- __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+ __pthread_unlock(&sem->__sem_lock);
return retval;
}
@@ -124,19 +141,21 @@ int __new_sem_post(sem_t * sem)
struct pthread_request request;
if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
- __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+ __pthread_lock(&sem->__sem_lock, self);
if (sem->__sem_waiting == NULL) {
if (sem->__sem_value >= SEM_VALUE_MAX) {
/* Overflow */
errno = ERANGE;
- __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+ __pthread_unlock(&sem->__sem_lock);
return -1;
}
sem->__sem_value++;
- __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+ __pthread_unlock(&sem->__sem_lock);
} else {
th = dequeue(&sem->__sem_waiting);
- __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+ __pthread_unlock(&sem->__sem_lock);
+ th->p_sem_avail = 1;
+ WRITE_MEMORY_BARRIER();
restart(th);
}
} else {
@@ -150,8 +169,8 @@ int __new_sem_post(sem_t * sem)
}
request.req_kind = REQ_POST;
request.req_args.post = sem;
- __libc_write(__pthread_manager_request,
- (char *) &request, sizeof(request));
+ TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
+ (char *) &request, sizeof(request)));
}
return 0;
}
@@ -189,21 +208,99 @@ int sem_unlink(const char *name)
return -1;
}
-#if defined PIC && DO_VERSIONING
-default_symbol_version (__new_sem_init, sem_init, GLIBC_2.1);
-default_symbol_version (__new_sem_wait, sem_wait, GLIBC_2.1);
-default_symbol_version (__new_sem_trywait, sem_trywait, GLIBC_2.1);
-default_symbol_version (__new_sem_post, sem_post, GLIBC_2.1);
-default_symbol_version (__new_sem_getvalue, sem_getvalue, GLIBC_2.1);
-default_symbol_version (__new_sem_destroy, sem_destroy, GLIBC_2.1);
-#else
-# ifdef weak_alias
+int sem_timedwait(sem_t *sem, const struct timespec *abstime)
+{
+ pthread_descr self = thread_self();
+ pthread_extricate_if extr;
+ int already_canceled = 0;
+ int spurious_wakeup_count;
+
+ __pthread_lock(&sem->__sem_lock, self);
+ if (sem->__sem_value > 0) {
+ --sem->__sem_value;
+ __pthread_unlock(&sem->__sem_lock);
+ return 0;
+ }
+
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
+ /* The standard requires that if the function would block and the
+ time value is illegal, the function returns with an error. */
+ __pthread_unlock(&sem->__sem_lock);
+ return EINVAL;
+ }
+
+ /* Set up extrication interface */
+ extr.pu_object = sem;
+ extr.pu_extricate_func = new_sem_extricate_func;
+
+ /* Register extrication interface */
+ THREAD_SETMEM(self, p_sem_avail, 0);
+ __pthread_set_own_extricate_if(self, &extr);
+ /* Enqueue only if not already cancelled. */
+ if (!(THREAD_GETMEM(self, p_canceled)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+ enqueue(&sem->__sem_waiting, self);
+ else
+ already_canceled = 1;
+ __pthread_unlock(&sem->__sem_lock);
+
+ if (already_canceled) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
+ spurious_wakeup_count = 0;
+ while (1)
+ {
+ if (timedsuspend(self, abstime) == 0) {
+ int was_on_queue;
+
+ /* __pthread_lock will queue back any spurious restarts that
+ may happen to it. */
+
+ __pthread_lock(&sem->__sem_lock, self);
+ was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
+ __pthread_unlock(&sem->__sem_lock);
+
+ if (was_on_queue) {
+ __pthread_set_own_extricate_if(self, 0);
+ return ETIMEDOUT;
+ }
+
+ /* Eat the outstanding restart() from the signaller */
+ suspend(self);
+ }
+
+ if (THREAD_GETMEM(self, p_sem_avail) == 0
+ && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
+ || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
+ {
+ /* Count resumes that don't belong to us. */
+ spurious_wakeup_count++;
+ continue;
+ }
+ break;
+ }
+
+ __pthread_set_own_extricate_if(self, 0);
+
+ /* Terminate only if the wakeup came from cancellation. */
+ /* Otherwise ignore cancellation because we got the semaphore. */
+
+ if (THREAD_GETMEM(self, p_woken_by_cancel)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
+ THREAD_SETMEM(self, p_woken_by_cancel, 0);
+ pthread_exit(PTHREAD_CANCELED);
+ }
+ /* We got the semaphore */
+ return 0;
+}
+
+
weak_alias (__new_sem_init, sem_init)
weak_alias (__new_sem_wait, sem_wait)
weak_alias (__new_sem_trywait, sem_trywait)
weak_alias (__new_sem_post, sem_post)
weak_alias (__new_sem_getvalue, sem_getvalue)
weak_alias (__new_sem_destroy, sem_destroy)
-# endif
-#endif
-
+
diff --git a/libpthread/linuxthreads/semaphore.h b/libpthread/linuxthreads/semaphore.h
index 84742233b..9c283c864 100644
--- a/libpthread/linuxthreads/semaphore.h
+++ b/libpthread/linuxthreads/semaphore.h
@@ -17,6 +17,10 @@
#include <features.h>
#include <sys/types.h>
+#ifdef __USE_XOPEN2K
+# define __need_timespec
+# include <time.h>
+#endif
#ifndef _PTHREAD_DESCR_DEFINED
/* Thread descriptors. Needed for `sem_t' definition. */
@@ -27,11 +31,7 @@ typedef struct _pthread_descr_struct *_pthread_descr;
/* System specific semaphore definition. */
typedef struct
{
- struct
- {
- long int status;
- int spinlock;
- } __sem_lock;
+ struct _pthread_fastlock __sem_lock;
int __sem_value;
_pthread_descr __sem_waiting;
} sem_t;
@@ -49,31 +49,39 @@ __BEGIN_DECLS
/* Initialize semaphore object SEM to VALUE. If PSHARED then share it
with other processes. */
-extern int sem_init __P ((sem_t *__sem, int __pshared, unsigned int __value));
+extern int sem_init (sem_t *__sem, int __pshared, unsigned int __value) __THROW;
/* Free resources associated with semaphore object SEM. */
-extern int sem_destroy __P ((sem_t *__sem));
+extern int sem_destroy (sem_t *__sem) __THROW;
/* Open a named semaphore NAME with open flaot OFLAG. */
-extern sem_t *sem_open __P ((__const char *__name, int __oflag, ...));
+extern sem_t *sem_open (__const char *__name, int __oflag, ...) __THROW;
/* Close descriptor for named semaphore SEM. */
-extern int sem_close __P ((sem_t *__sem));
+extern int sem_close (sem_t *__sem) __THROW;
/* Remove named semaphore NAME. */
-extern int sem_unlink __P ((__const char *__name));
+extern int sem_unlink (__const char *__name) __THROW;
/* Wait for SEM being posted. */
-extern int sem_wait __P ((sem_t *__sem));
+extern int sem_wait (sem_t *__sem) __THROW;
+
+#ifdef __USE_XOPEN2K
+/* Similar to `sem_wait' but wait only until ABSTIME. */
+extern int sem_timedwait (sem_t *__restrict __sem,
+ __const struct timespec *__restrict __abstime)
+ __THROW;
+#endif
/* Test whether SEM is posted. */
-extern int sem_trywait __P ((sem_t *__sem));
+extern int sem_trywait (sem_t *__sem) __THROW;
/* Post SEM. */
-extern int sem_post __P ((sem_t *__sem));
+extern int sem_post (sem_t *__sem) __THROW;
/* Get current value of SEM and store it in *SVAL. */
-extern int sem_getvalue __P ((sem_t *__sem, int *__sval));
+extern int sem_getvalue (sem_t *__restrict __sem, int *__restrict __sval)
+ __THROW;
__END_DECLS
diff --git a/libpthread/linuxthreads/signals.c b/libpthread/linuxthreads/signals.c
index fb5e6f6bc..3819d4099 100644
--- a/libpthread/linuxthreads/signals.c
+++ b/libpthread/linuxthreads/signals.c
@@ -127,7 +127,7 @@ static void pthread_sighandler_rt(int signo, struct siginfo *si,
/* The wrapper around sigaction. Install our own signal handler
around the signal. */
-int sigaction(int sig, const struct sigaction * act,
+int __sigaction(int sig, const struct sigaction * act,
struct sigaction * oact)
{
struct sigaction newact;
@@ -171,6 +171,7 @@ printf(__FUNCTION__": signahdler installed, __sigaction successful\n");
}
return 0;
}
+strong_alias(__sigaction, sigaction)
/* A signal handler that does nothing */
static void pthread_null_sighandler(int sig) { }
diff --git a/libpthread/linuxthreads/spinlock.c b/libpthread/linuxthreads/spinlock.c
index b1a99d975..5d4bd20e4 100644
--- a/libpthread/linuxthreads/spinlock.c
+++ b/libpthread/linuxthreads/spinlock.c
@@ -14,20 +14,42 @@
/* Internal locks */
+#define __FORCE_GLIBC
+#include <features.h>
#include <errno.h>
#include <sched.h>
#include <time.h>
+#include <stdlib.h>
+#include <limits.h>
#include "pthread.h"
#include "internals.h"
#include "spinlock.h"
#include "restart.h"
-/* The status field of a fastlock has the following meaning:
- 0: fastlock is free
- 1: fastlock is taken, no thread is waiting on it
- ADDR: fastlock is taken, ADDR is address of thread descriptor for
- first waiting thread, other waiting threads are linked via
- their p_nextlock field.
+static void __pthread_acquire(int * spinlock);
+
+static inline void __pthread_release(int * spinlock)
+{
+ WRITE_MEMORY_BARRIER();
+ *spinlock = __LT_SPINLOCK_INIT;
+ __asm __volatile ("" : "=m" (*spinlock) : "0" (*spinlock));
+}
+
+
+/* The status field of a spinlock is a pointer whose least significant
+ bit is a locked flag.
+
+ Thus the field values have the following meanings:
+
+ status == 0: spinlock is free
+ status == 1: spinlock is taken; no thread is waiting on it
+
+ (status & 1) == 1: spinlock is taken and (status & ~1L) is a
+ pointer to the first waiting thread; other
+ waiting threads are linked via the p_nextlock
+ field.
+ (status & 1) == 0: same as above, but spinlock is not taken.
+
The waiting list is not sorted by priority order.
Actually, we always insert at top of list (sole insertion mode
that can be performed without locking).
@@ -36,35 +58,95 @@
This is safe because there are no concurrent __pthread_unlock
operations -- only the thread that locked the mutex can unlock it. */
+
void internal_function __pthread_lock(struct _pthread_fastlock * lock,
pthread_descr self)
{
+#if defined HAS_COMPARE_AND_SWAP
long oldstatus, newstatus;
- int spurious_wakeup_count = 0;
+ int successful_seizure, spurious_wakeup_count;
+ int spin_count;
+#endif
+
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ __pthread_acquire(&lock->__spinlock);
+ return;
+ }
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
+ /* First try it without preparation. Maybe it's a completely
+ uncontested lock. */
+ if (lock->__status == 0 && __compare_and_swap (&lock->__status, 0, 1))
+ return;
+
+ spurious_wakeup_count = 0;
+ spin_count = 0;
+
+ /* On SMP, try spinning to get the lock. */
+#if 0
+ if (__pthread_smp_kernel) {
+ int max_count = lock->__spinlock * 2 + 10;
+
+ if (max_count > MAX_ADAPTIVE_SPIN_COUNT)
+ max_count = MAX_ADAPTIVE_SPIN_COUNT;
+
+ for (spin_count = 0; spin_count < max_count; spin_count++) {
+ if (((oldstatus = lock->__status) & 1) == 0) {
+ if(__compare_and_swap(&lock->__status, oldstatus, oldstatus | 1))
+ {
+ if (spin_count)
+ lock->__spinlock += (spin_count - lock->__spinlock) / 8;
+ READ_MEMORY_BARRIER();
+ return;
+ }
+ }
+#ifdef BUSY_WAIT_NOP
+ BUSY_WAIT_NOP;
+#endif
+ __asm __volatile ("" : "=m" (lock->__status) : "0" (lock->__status));
+ }
+
+ lock->__spinlock += (spin_count - lock->__spinlock) / 8;
+ }
+#endif
+
+again:
+
+ /* No luck, try once more or suspend. */
do {
oldstatus = lock->__status;
- if (oldstatus == 0) {
- newstatus = 1;
+ successful_seizure = 0;
+
+ if ((oldstatus & 1) == 0) {
+ newstatus = oldstatus | 1;
+ successful_seizure = 1;
} else {
if (self == NULL)
self = thread_self();
- newstatus = (long) self;
+ newstatus = (long) self | 1;
}
+
if (self != NULL) {
- ASSERT(self->p_nextlock == NULL);
- THREAD_SETMEM(self, p_nextlock, (pthread_descr) oldstatus);
+ THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus));
+ /* Make sure the store in p_nextlock completes before performing
+ the compare-and-swap */
+ MEMORY_BARRIER();
}
- } while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
- &lock->__spinlock));
+ } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
- /* Suspend with guard against spurious wakeup.
+ /* Suspend with guard against spurious wakeup.
This can happen in pthread_cond_timedwait_relative, when the thread
wakes up due to timeout and is still on the condvar queue, and then
locks the queue to remove itself. At that point it may still be on the
queue, and may be resumed by a condition signal. */
- if (oldstatus != 0) {
+ if (!successful_seizure) {
for (;;) {
suspend(self);
if (self->p_nextlock != NULL) {
@@ -74,61 +156,506 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
}
break;
}
+ goto again;
}
/* Put back any resumes we caught that don't belong to us. */
while (spurious_wakeup_count--)
restart(self);
+
+ READ_MEMORY_BARRIER();
+#endif
}
-void internal_function __pthread_unlock(struct _pthread_fastlock * lock)
+int __pthread_unlock(struct _pthread_fastlock * lock)
{
+#if defined HAS_COMPARE_AND_SWAP
long oldstatus;
pthread_descr thr, * ptr, * maxptr;
int maxprio;
+#endif
+
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ __pthread_release(&lock->__spinlock);
+ return 0;
+ }
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
+ WRITE_MEMORY_BARRIER();
again:
- oldstatus = lock->__status;
- if (oldstatus == 0 || oldstatus == 1) {
- /* No threads are waiting for this lock. Please note that we also
- enter this case if the lock is not taken at all. If this wouldn't
- be done here we would crash further down. */
- if (! compare_and_swap(&lock->__status, oldstatus, 0, &lock->__spinlock))
- goto again;
- return;
+ while ((oldstatus = lock->__status) == 1) {
+ if (__compare_and_swap_with_release_semantics(&lock->__status,
+ oldstatus, 0))
+ return 0;
}
+
/* Find thread in waiting queue with maximal priority */
ptr = (pthread_descr *) &lock->__status;
- thr = (pthread_descr) oldstatus;
+ thr = (pthread_descr) (oldstatus & ~1L);
maxprio = 0;
maxptr = ptr;
- while (thr != (pthread_descr) 1) {
+
+ /* Before we iterate over the wait queue, we need to execute
+ a read barrier, otherwise we may read stale contents of nodes that may
+ just have been inserted by other processors. One read barrier is enough to
+ ensure we have a stable list; we don't need one for each pointer chase
+ through the list, because we are the owner of the lock; other threads
+ can only add nodes at the front; if a front node is consistent,
+ the ones behind it must also be. */
+
+ READ_MEMORY_BARRIER();
+
+ while (thr != 0) {
if (thr->p_priority >= maxprio) {
maxptr = ptr;
maxprio = thr->p_priority;
}
ptr = &(thr->p_nextlock);
- thr = *ptr;
+ thr = (pthread_descr)((long)(thr->p_nextlock) & ~1L);
}
+
/* Remove max prio thread from waiting list. */
if (maxptr == (pthread_descr *) &lock->__status) {
/* If max prio thread is at head, remove it with compare-and-swap
- to guard against concurrent lock operation */
- thr = (pthread_descr) oldstatus;
- if (! compare_and_swap(&lock->__status,
- oldstatus, (long)(thr->p_nextlock),
- &lock->__spinlock))
+ to guard against concurrent lock operation. This removal
+ also has the side effect of marking the lock as released
+ because the new status comes from thr->p_nextlock whose
+ least significant bit is clear. */
+ thr = (pthread_descr) (oldstatus & ~1L);
+ if (! __compare_and_swap_with_release_semantics
+ (&lock->__status, oldstatus, (long)(thr->p_nextlock) & ~1L))
goto again;
} else {
- /* No risk of concurrent access, remove max prio thread normally */
- thr = *maxptr;
+ /* No risk of concurrent access, remove max prio thread normally.
+ But in this case we must also flip the least significant bit
+ of the status to mark the lock as released. */
+ thr = (pthread_descr)((long)*maxptr & ~1L);
*maxptr = thr->p_nextlock;
+
+ /* Ensure deletion from linked list completes before we
+ release the lock. */
+ WRITE_MEMORY_BARRIER();
+
+ do {
+ oldstatus = lock->__status;
+ } while (!__compare_and_swap_with_release_semantics(&lock->__status,
+ oldstatus, oldstatus & ~1L));
}
- /* Wake up the selected waiting thread */
+
+ /* Wake up the selected waiting thread. Woken thread can check
+ its own p_nextlock field for NULL to detect that it has been removed. No
+ barrier is needed here, since restart() and suspend() take
+ care of memory synchronization. */
+
thr->p_nextlock = NULL;
restart(thr);
+
+ return 0;
+#endif
+}
+
+/*
+ * Alternate fastlocks do not queue threads directly. Instead, they queue
+ * these wait queue node structures. When a timed wait wakes up due to
+ * a timeout, it can leave its wait node in the queue (because there
+ * is no safe way to remove from the quue). Some other thread will
+ * deallocate the abandoned node.
+ */
+
+
+struct wait_node {
+ struct wait_node *next; /* Next node in null terminated linked list */
+ pthread_descr thr; /* The thread waiting with this node */
+ int abandoned; /* Atomic flag */
+};
+
+static long wait_node_free_list;
+static int wait_node_free_list_spinlock;
+
+/* Allocate a new node from the head of the free list using an atomic
+ operation, or else using malloc if that list is empty. A fundamental
+ assumption here is that we can safely access wait_node_free_list->next.
+ That's because we never free nodes once we allocate them, so a pointer to a
+ node remains valid indefinitely. */
+
+static struct wait_node *wait_node_alloc(void)
+{
+ struct wait_node *new_node = 0;
+
+ __pthread_acquire(&wait_node_free_list_spinlock);
+ if (wait_node_free_list != 0) {
+ new_node = (struct wait_node *) wait_node_free_list;
+ wait_node_free_list = (long) new_node->next;
+ }
+ WRITE_MEMORY_BARRIER();
+ __pthread_release(&wait_node_free_list_spinlock);
+
+ if (new_node == 0)
+ return malloc(sizeof *wait_node_alloc());
+
+ return new_node;
+}
+
+/* Return a node to the head of the free list using an atomic
+ operation. */
+
+static void wait_node_free(struct wait_node *wn)
+{
+ __pthread_acquire(&wait_node_free_list_spinlock);
+ wn->next = (struct wait_node *) wait_node_free_list;
+ wait_node_free_list = (long) wn;
+ WRITE_MEMORY_BARRIER();
+ __pthread_release(&wait_node_free_list_spinlock);
+ return;
+}
+
+#if defined HAS_COMPARE_AND_SWAP
+
+/* Remove a wait node from the specified queue. It is assumed
+ that the removal takes place concurrently with only atomic insertions at the
+ head of the queue. */
+
+static void wait_node_dequeue(struct wait_node **pp_head,
+ struct wait_node **pp_node,
+ struct wait_node *p_node)
+{
+ /* If the node is being deleted from the head of the
+ list, it must be deleted using atomic compare-and-swap.
+ Otherwise it can be deleted in the straightforward way. */
+
+ if (pp_node == pp_head) {
+ /* We don't need a read barrier between these next two loads,
+ because it is assumed that the caller has already ensured
+ the stability of *p_node with respect to p_node. */
+
+ long oldvalue = (long) p_node;
+ long newvalue = (long) p_node->next;
+
+ if (__compare_and_swap((long *) pp_node, oldvalue, newvalue))
+ return;
+
+ /* Oops! Compare and swap failed, which means the node is
+ no longer first. We delete it using the ordinary method. But we don't
+ know the identity of the node which now holds the pointer to the node
+ being deleted, so we must search from the beginning. */
+
+ for (pp_node = pp_head; p_node != *pp_node; ) {
+ pp_node = &(*pp_node)->next;
+ READ_MEMORY_BARRIER(); /* Stabilize *pp_node for next iteration. */
+ }
+ }
+
+ *pp_node = p_node->next;
+ return;
+}
+
+#endif
+
+void __pthread_alt_lock(struct _pthread_fastlock * lock,
+ pthread_descr self)
+{
+#if defined HAS_COMPARE_AND_SWAP
+ long oldstatus, newstatus;
+#endif
+ struct wait_node wait_node;
+
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ int suspend_needed = 0;
+ __pthread_acquire(&lock->__spinlock);
+
+ if (lock->__status == 0)
+ lock->__status = 1;
+ else {
+ if (self == NULL)
+ self = thread_self();
+
+ wait_node.abandoned = 0;
+ wait_node.next = (struct wait_node *) lock->__status;
+ wait_node.thr = self;
+ lock->__status = (long) &wait_node;
+ suspend_needed = 1;
+ }
+
+ __pthread_release(&lock->__spinlock);
+
+ if (suspend_needed)
+ suspend (self);
+ return;
+ }
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
+ do {
+ oldstatus = lock->__status;
+ if (oldstatus == 0) {
+ newstatus = 1;
+ } else {
+ if (self == NULL)
+ self = thread_self();
+ wait_node.thr = self;
+ newstatus = (long) &wait_node;
+ }
+ wait_node.abandoned = 0;
+ wait_node.next = (struct wait_node *) oldstatus;
+ /* Make sure the store in wait_node.next completes before performing
+ the compare-and-swap */
+ MEMORY_BARRIER();
+ } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
+
+ /* Suspend. Note that unlike in __pthread_lock, we don't worry
+ here about spurious wakeup. That's because this lock is not
+ used in situations where that can happen; the restart can
+ only come from the previous lock owner. */
+
+ if (oldstatus != 0)
+ suspend(self);
+
+ READ_MEMORY_BARRIER();
+#endif
+}
+
+/* Timed-out lock operation; returns 0 to indicate timeout. */
+
+int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
+ pthread_descr self, const struct timespec *abstime)
+{
+ long oldstatus = 0;
+#if defined HAS_COMPARE_AND_SWAP
+ long newstatus;
+#endif
+ struct wait_node *p_wait_node = wait_node_alloc();
+
+ /* Out of memory, just give up and do ordinary lock. */
+ if (p_wait_node == 0) {
+ __pthread_alt_lock(lock, self);
+ return 1;
+ }
+
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ __pthread_acquire(&lock->__spinlock);
+
+ if (lock->__status == 0)
+ lock->__status = 1;
+ else {
+ if (self == NULL)
+ self = thread_self();
+
+ p_wait_node->abandoned = 0;
+ p_wait_node->next = (struct wait_node *) lock->__status;
+ p_wait_node->thr = self;
+ lock->__status = (long) p_wait_node;
+ oldstatus = 1; /* force suspend */
+ }
+
+ __pthread_release(&lock->__spinlock);
+ goto suspend;
+ }
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
+ do {
+ oldstatus = lock->__status;
+ if (oldstatus == 0) {
+ newstatus = 1;
+ } else {
+ if (self == NULL)
+ self = thread_self();
+ p_wait_node->thr = self;
+ newstatus = (long) p_wait_node;
+ }
+ p_wait_node->abandoned = 0;
+ p_wait_node->next = (struct wait_node *) oldstatus;
+ /* Make sure the store in wait_node.next completes before performing
+ the compare-and-swap */
+ MEMORY_BARRIER();
+ } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
+#endif
+
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ suspend:
+#endif
+
+ /* If we did not get the lock, do a timed suspend. If we wake up due
+ to a timeout, then there is a race; the old lock owner may try
+ to remove us from the queue. This race is resolved by us and the owner
+ doing an atomic testandset() to change the state of the wait node from 0
+ to 1. If we succeed, then it's a timeout and we abandon the node in the
+ queue. If we fail, it means the owner gave us the lock. */
+
+ if (oldstatus != 0) {
+ if (timedsuspend(self, abstime) == 0) {
+ if (!testandset(&p_wait_node->abandoned))
+ return 0; /* Timeout! */
+
+ /* Eat oustanding resume from owner, otherwise wait_node_free() below
+ will race with owner's wait_node_dequeue(). */
+ suspend(self);
+ }
+ }
+
+ wait_node_free(p_wait_node);
+
+ READ_MEMORY_BARRIER();
+
+ return 1; /* Got the lock! */
+}
+
+void __pthread_alt_unlock(struct _pthread_fastlock *lock)
+{
+ struct wait_node *p_node, **pp_node, *p_max_prio, **pp_max_prio;
+ struct wait_node ** const pp_head = (struct wait_node **) &lock->__status;
+ int maxprio;
+
+ WRITE_MEMORY_BARRIER();
+
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ __pthread_acquire(&lock->__spinlock);
+ }
+#endif
+
+ while (1) {
+
+ /* If no threads are waiting for this lock, try to just
+ atomically release it. */
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ if (lock->__status == 0 || lock->__status == 1) {
+ lock->__status = 0;
+ break;
+ }
+ }
+#endif
+
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ else
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
+ {
+ long oldstatus = lock->__status;
+ if (oldstatus == 0 || oldstatus == 1) {
+ if (__compare_and_swap_with_release_semantics (&lock->__status, oldstatus, 0))
+ break;
+ else
+ continue;
+ }
+ }
+#endif
+
+ /* Process the entire queue of wait nodes. Remove all abandoned
+ wait nodes and put them into the global free queue, and
+ remember the one unabandoned node which refers to the thread
+ having the highest priority. */
+
+ pp_max_prio = pp_node = pp_head;
+ p_max_prio = p_node = *pp_head;
+ maxprio = INT_MIN;
+
+ READ_MEMORY_BARRIER(); /* Prevent access to stale data through p_node */
+
+ while (p_node != (struct wait_node *) 1) {
+ int prio;
+
+ if (p_node->abandoned) {
+ /* Remove abandoned node. */
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ *pp_node = p_node->next;
+#endif
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ else
+#endif
+#if defined HAS_COMPARE_AND_SWAP
+ wait_node_dequeue(pp_head, pp_node, p_node);
+#endif
+ wait_node_free(p_node);
+ /* Note that the next assignment may take us to the beginning
+ of the queue, to newly inserted nodes, if pp_node == pp_head.
+ In that case we need a memory barrier to stabilize the first of
+ these new nodes. */
+ p_node = *pp_node;
+ if (pp_node == pp_head)
+ READ_MEMORY_BARRIER(); /* No stale reads through p_node */
+ continue;
+ } else if ((prio = p_node->thr->p_priority) >= maxprio) {
+ /* Otherwise remember it if its thread has a higher or equal priority
+ compared to that of any node seen thus far. */
+ maxprio = prio;
+ pp_max_prio = pp_node;
+ p_max_prio = p_node;
+ }
+
+ /* This canno6 jump backward in the list, so no further read
+ barrier is needed. */
+ pp_node = &p_node->next;
+ p_node = *pp_node;
+ }
+
+ /* If all threads abandoned, go back to top */
+ if (maxprio == INT_MIN)
+ continue;
+
+ ASSERT (p_max_prio != (struct wait_node *) 1);
+
+ /* Now we want to to remove the max priority thread's wait node from
+ the list. Before we can do this, we must atomically try to change the
+ node's abandon state from zero to nonzero. If we succeed, that means we
+ have the node that we will wake up. If we failed, then it means the
+ thread timed out and abandoned the node in which case we repeat the
+ whole unlock operation. */
+
+ if (!testandset(&p_max_prio->abandoned)) {
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ *pp_max_prio = p_max_prio->next;
+#endif
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ else
+#endif
+#if defined HAS_COMPARE_AND_SWAP
+ wait_node_dequeue(pp_head, pp_max_prio, p_max_prio);
+#endif
+ restart(p_max_prio->thr);
+ break;
+ }
+ }
+
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ __pthread_release(&lock->__spinlock);
+ }
+#endif
}
+
/* Compare-and-swap emulation with a spinlock */
#ifdef TEST_FOR_COMPARE_AND_SWAP
@@ -137,24 +664,25 @@ int __pthread_has_cas = 0;
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
-static void __pthread_acquire(int * spinlock);
-
int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
int * spinlock)
{
int res;
- if (testandset(spinlock)) __pthread_acquire(spinlock);
+
+ __pthread_acquire(spinlock);
+
if (*ptr == oldval) {
*ptr = newval; res = 1;
} else {
res = 0;
}
- *spinlock = 0;
+
+ __pthread_release(spinlock);
+
return res;
}
-/* This function is called if the inlined test-and-set
- in __pthread_compare_and_swap() failed */
+#endif
/* The retry strategy is as follows:
- We test and set the spinlock MAX_SPIN_COUNT times, calling
@@ -179,6 +707,8 @@ static void __pthread_acquire(int * spinlock)
int cnt = 0;
struct timespec tm;
+ READ_MEMORY_BARRIER();
+
while (testandset(spinlock)) {
if (cnt < MAX_SPIN_COUNT) {
sched_yield();
@@ -191,5 +721,3 @@ static void __pthread_acquire(int * spinlock)
}
}
}
-
-#endif
diff --git a/libpthread/linuxthreads/spinlock.h b/libpthread/linuxthreads/spinlock.h
index aae18a27b..0ec40c57c 100644
--- a/libpthread/linuxthreads/spinlock.h
+++ b/libpthread/linuxthreads/spinlock.h
@@ -12,6 +12,27 @@
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU Library General Public License for more details. */
+#include <bits/initspin.h>
+
+
+/* There are 2 compare and swap synchronization primitives with
+ different semantics:
+
+ 1. compare_and_swap, which has acquire semantics (i.e. it
+ completes befor subsequent writes.)
+ 2. compare_and_swap_with_release_semantics, which has release
+ semantics (it completes after previous writes.)
+
+ For those platforms on which they are the same. HAS_COMPARE_AND_SWAP
+ should be defined. For those platforms on which they are different,
+ HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS has to be defined. */
+
+#ifndef HAS_COMPARE_AND_SWAP
+#ifdef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
+#define HAS_COMPARE_AND_SWAP
+#endif
+#endif
+
#if defined(TEST_FOR_COMPARE_AND_SWAP)
extern int __pthread_has_cas;
@@ -29,6 +50,22 @@ static inline int compare_and_swap(long * ptr, long oldval, long newval,
#elif defined(HAS_COMPARE_AND_SWAP)
+#ifdef IMPLEMENT_TAS_WITH_CAS
+#define testandset(p) !__compare_and_swap((long int *) p, 0, 1)
+#endif
+
+#ifdef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
+
+static inline int
+compare_and_swap_with_release_semantics (long * ptr, long oldval,
+ long newval, int * spinlock)
+{
+ return __compare_and_swap_with_release_semantics (ptr, oldval,
+ newval);
+}
+
+#endif
+
static inline int compare_and_swap(long * ptr, long oldval, long newval,
int * spinlock)
{
@@ -48,30 +85,90 @@ static inline int compare_and_swap(long * ptr, long oldval, long newval,
#endif
+#ifndef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
+#define compare_and_swap_with_release_semantics compare_and_swap
+#define __compare_and_swap_with_release_semantics __compare_and_swap
+#endif
+
/* Internal locks */
extern void internal_function __pthread_lock(struct _pthread_fastlock * lock,
pthread_descr self);
-extern void internal_function __pthread_unlock(struct _pthread_fastlock *lock);
+extern int __pthread_unlock(struct _pthread_fastlock *lock);
static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
{
lock->__status = 0;
- lock->__spinlock = 0;
+ lock->__spinlock = __LT_SPINLOCK_INIT;
}
static inline int __pthread_trylock (struct _pthread_fastlock * lock)
{
- long oldstatus;
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ return (testandset(&lock->__spinlock) ? EBUSY : 0);
+ }
+#endif
+#if defined HAS_COMPARE_AND_SWAP
do {
- oldstatus = lock->__status;
- if (oldstatus != 0) return EBUSY;
- } while(! compare_and_swap(&lock->__status, 0, 1, &lock->__spinlock));
+ if (lock->__status != 0) return EBUSY;
+ } while(! __compare_and_swap(&lock->__status, 0, 1));
return 0;
+#endif
+}
+
+/* Variation of internal lock used for pthread_mutex_t, supporting
+ timed-out waits. Warning: do not mix these operations with the above ones
+ over the same lock object! */
+
+extern void __pthread_alt_lock(struct _pthread_fastlock * lock,
+ pthread_descr self);
+
+extern int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
+ pthread_descr self, const struct timespec *abstime);
+
+extern void __pthread_alt_unlock(struct _pthread_fastlock *lock);
+
+static inline void __pthread_alt_init_lock(struct _pthread_fastlock * lock)
+{
+ lock->__status = 0;
+ lock->__spinlock = __LT_SPINLOCK_INIT;
}
-#define LOCK_INITIALIZER {0, 0}
+static inline int __pthread_alt_trylock (struct _pthread_fastlock * lock)
+{
+#if defined TEST_FOR_COMPARE_AND_SWAP
+ if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ {
+ int res = EBUSY;
+
+ if (testandset(&lock->__spinlock) == 0)
+ {
+ if (lock->__status == 0)
+ {
+ lock->__status = 1;
+ WRITE_MEMORY_BARRIER();
+ res = 0;
+ }
+ lock->__spinlock = __LT_SPINLOCK_INIT;
+ }
+ return res;
+ }
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
+ do {
+ if (lock->__status != 0) return EBUSY;
+ } while(! compare_and_swap(&lock->__status, 0, 1, &lock->__spinlock));
+ return 0;
+#endif
+}
/* Operations on pthread_atomic, which is defined in internals.h */
@@ -98,5 +195,24 @@ static inline long atomic_decrement(struct pthread_atomic *pa)
return oldval;
}
-#define ATOMIC_INITIALIZER { 0, 0 }
+static inline void
+__pthread_set_own_extricate_if (pthread_descr self, pthread_extricate_if *peif)
+{
+ /* Only store a non-null peif if the thread has cancellation enabled.
+ Otherwise pthread_cancel will unconditionally call the extricate handler,
+ and restart the thread giving rise to forbidden spurious wakeups. */
+ if (peif == NULL
+ || THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)
+ {
+ /* If we are removing the extricate interface, we need to synchronize
+ against pthread_cancel so that it does not continue with a pointer
+ to a deallocated pthread_extricate_if struct! The thread lock
+ is (ab)used for this synchronization purpose. */
+ if (peif == NULL)
+ __pthread_lock (THREAD_GETMEM(self, p_lock), self);
+ THREAD_SETMEM(self, p_extricate, peif);
+ if (peif == NULL)
+ __pthread_unlock (THREAD_GETMEM(self, p_lock));
+ }
+}
diff --git a/libpthread/linuxthreads/sysdeps/alpha/pt-machine.h b/libpthread/linuxthreads/sysdeps/alpha/pt-machine.h
index e59c6906c..fa0374bc5 100644
--- a/libpthread/linuxthreads/sysdeps/alpha/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/alpha/pt-machine.h
@@ -1,30 +1,39 @@
/* Machine-dependent pthreads configuration and inline functions.
Alpha version.
- Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 2000, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson <rth@tamu.edu>.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
#ifndef PT_EI
# define PT_EI extern inline
#endif
-#include <asm/pal.h>
+#ifdef __linux__
+# include <asm/pal.h>
+#else
+# include <machine/pal.h>
+#endif
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
@@ -32,6 +41,12 @@
register char *stack_pointer __asm__("$30");
+/* Memory barrier; default is to do nothing */
+#define MEMORY_BARRIER() __asm__ __volatile__("mb" : : : "memory")
+/* Write barrier. */
+#define WRITE_MEMORY_BARRIER() __asm__ __volatile__("wmb" : : : "memory")
+
+
/* Spinlock implementation; required. */
PT_EI long int
testandset (int *spinlock)
@@ -55,11 +70,6 @@ testandset (int *spinlock)
return ret;
}
-/* Spinlock release; default is just set to zero. */
-#define RELEASE(spinlock) \
- __asm__ __volatile__("mb" : : : "memory"); \
- *spinlock = 0
-
/* Begin allocating thread stacks at this address. Default is to allocate
them just below the initial program stack. */
@@ -70,7 +80,7 @@ testandset (int *spinlock)
#define THREAD_SELF \
({ \
register pthread_descr __self __asm__("$0"); \
- __asm__ ("call_pal %1" : "=r"(__self) : "i"(PAL_rduniq) : "$0"); \
+ __asm__ ("call_pal %1" : "=r"(__self) : "i"(PAL_rduniq)); \
__self; \
})
@@ -102,7 +112,16 @@ __compare_and_swap (long int *p, long int oldval, long int newval)
"2:\tmb\n"
"/* End compare & swap */"
: "=&r"(ret), "=m"(*p)
- : "r"(oldval), "r"(newval), "m"(*p));
+ : "r"(oldval), "r"(newval), "m"(*p)
+ : "memory");
return ret;
}
+
+/* We want the OS to assign stack addresses. */
+#define FLOATING_STACKS 1
+
+/* Maximum size of the stack if the rlimit is unlimited. */
+#define ARCH_STACK_MAX_SIZE 32*1024*1024
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/alpha/stackinfo.h b/libpthread/linuxthreads/sysdeps/alpha/stackinfo.h
new file mode 100644
index 000000000..0a281bd43
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/alpha/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 2001 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On Alpha the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */
diff --git a/libpthread/linuxthreads/sysdeps/arm/pt-machine.h b/libpthread/linuxthreads/sysdeps/arm/pt-machine.h
index d4dc4c4ed..71001ebc2 100644
--- a/libpthread/linuxthreads/sysdeps/arm/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/arm/pt-machine.h
@@ -1,35 +1,40 @@
/* Machine-dependent pthreads configuration and inline functions.
ARM version.
- Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1997, 1998, 2000, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Philip Blundell <philb@gnu.org>.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
#ifndef PT_EI
# define PT_EI extern inline
#endif
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
/* This will not work on ARM1 or ARM2 because SWP is lacking on those
machines. Unfortunately we have no way to detect this at compile
time; let's hope nobody tries to use one. */
/* Spinlock implementation; required. */
-PT_EI int
+PT_EI long int
testandset (int *spinlock)
{
register unsigned int ret;
@@ -46,3 +51,5 @@ testandset (int *spinlock)
of the stack, just something somewhere in the current frame. */
#define CURRENT_STACK_FRAME stack_pointer
register char * stack_pointer __asm__ ("sp");
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/arm/sigcontextinfo.h b/libpthread/linuxthreads/sysdeps/arm/sigcontextinfo.h
index aebc3cfd5..0e6d295c3 100644
--- a/libpthread/linuxthreads/sysdeps/arm/sigcontextinfo.h
+++ b/libpthread/linuxthreads/sysdeps/arm/sigcontextinfo.h
@@ -17,7 +17,80 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-#include <bits/armsigctx.h>
+
+/* Definition of `struct sigcontext' for Linux/ARM
+ Copyright (C) 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* The format of struct sigcontext changed between 2.0 and 2.1 kernels.
+ Fortunately 2.0 puts a magic number in the first word and this is not
+ a legal value for `trap_no', so we can tell them apart. */
+
+/* Early 2.2 and 2.3 kernels do not have the `fault_address' member in
+ the sigcontext structure. Unfortunately there is no reliable way
+ to test for its presence and this word will contain garbage for too-old
+ kernels. Versions 2.2.14 and 2.3.35 (plus later versions) are known to
+ include this element. */
+
+#ifndef __ARMSIGCTX_H
+#define __ARMSIGCTX_H 1
+
+#include <asm/ptrace.h>
+
+union k_sigcontext
+ {
+ struct
+ {
+ unsigned long int trap_no;
+ unsigned long int error_code;
+ unsigned long int oldmask;
+ unsigned long int arm_r0;
+ unsigned long int arm_r1;
+ unsigned long int arm_r2;
+ unsigned long int arm_r3;
+ unsigned long int arm_r4;
+ unsigned long int arm_r5;
+ unsigned long int arm_r6;
+ unsigned long int arm_r7;
+ unsigned long int arm_r8;
+ unsigned long int arm_r9;
+ unsigned long int arm_r10;
+ unsigned long int arm_fp;
+ unsigned long int arm_ip;
+ unsigned long int arm_sp;
+ unsigned long int arm_lr;
+ unsigned long int arm_pc;
+ unsigned long int arm_cpsr;
+ unsigned long fault_address;
+ } v21;
+ struct
+ {
+ unsigned long int magic;
+ struct pt_regs reg;
+ unsigned long int trap_no;
+ unsigned long int error_code;
+ unsigned long int oldmask;
+ } v20;
+};
+
+#define SIGCONTEXT_2_0_MAGIC 0x4B534154
+
+#endif /* bits/armsigctx.h */
#define SIGCONTEXT int _a2, int _a3, int _a4, union k_sigcontext
#define SIGCONTEXT_EXTRA_ARGS _a2, _a3, _a4,
diff --git a/libpthread/linuxthreads/sysdeps/arm/stackinfo.h b/libpthread/linuxthreads/sysdeps/arm/stackinfo.h
new file mode 100644
index 000000000..2410ba9bd
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/arm/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 2001 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On Arm the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */
diff --git a/libpthread/linuxthreads/sysdeps/cris/pt-machine.h b/libpthread/linuxthreads/sysdeps/cris/pt-machine.h
index e8073559a..eaead3058 100644
--- a/libpthread/linuxthreads/sysdeps/cris/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/cris/pt-machine.h
@@ -1,27 +1,33 @@
/* Machine-dependent pthreads configuration and inline functions.
CRIS version.
- Copyright (C) 2001 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
#ifndef PT_EI
# define PT_EI extern inline
#endif
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
PT_EI long int
testandset (int *spinlock)
{
@@ -37,7 +43,7 @@ testandset (int *spinlock)
"bwf 0b\n\t"
"clearf"
: "=&r" (ret), "=m" (*spinlock)
- : "r" (spinlock), "r" ((int) 1), "m" (*spinlock)
+ : "r" (spinlock), "r" ((int) 1)
: "memory");
return ret;
}
@@ -48,3 +54,5 @@ testandset (int *spinlock)
I don't trust register variables, so let's do this the safe way. */
#define CURRENT_STACK_FRAME \
({ char *sp; __asm__ ("move.d $sp,%0" : "=rm" (sp)); sp; })
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/cris/stackinfo.h b/libpthread/linuxthreads/sysdeps/cris/stackinfo.h
new file mode 100644
index 000000000..43c944834
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/cris/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On cris the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */
diff --git a/libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h b/libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h
index 8d9ea709b..3596224c1 100644
--- a/libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h
@@ -1,45 +1,51 @@
/* Machine-dependent pthreads configuration and inline functions.
i686 version.
- Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1996-2001, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson <rth@tamu.edu>.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
#ifndef PT_EI
# define PT_EI extern inline
#endif
+#include "kernel-features.h"
+#ifndef ASSEMBLER
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
-#define CURRENT_STACK_FRAME stack_pointer
-register char * stack_pointer __asm__ ("%esp");
+#define CURRENT_STACK_FRAME __builtin_frame_address (0)
/* Spinlock implementation; required. */
-PT_EI int
+PT_EI long int
testandset (int *spinlock)
{
- int ret;
+ long int ret;
__asm__ __volatile__ (
"xchgl %0, %1"
- : "=r"(ret), "=m"(*spinlock)
- : "0"(1), "m"(*spinlock)
+ : "=r" (ret), "=m" (*spinlock)
+ : "0" (1), "m" (*spinlock)
: "memory");
return ret;
@@ -61,7 +67,13 @@ __compare_and_swap (long int *p, long int oldval, long int newval)
: "memory");
return ret;
}
+#endif
+
+#if __ASSUME_LDT_WORKS > 0
+#include "../useldt.h"
+#endif
+/* The P4 and above really want some help to prevent overheating. */
+#define BUSY_WAIT_NOP __asm__ ("rep; nop")
-/* Use the LDT implementation only if the kernel is fixed. */
-//#include "../useldt.h"
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/i386/pt-machine.h b/libpthread/linuxthreads/sysdeps/i386/pt-machine.h
index f542bb2d2..79c69b549 100644
--- a/libpthread/linuxthreads/sysdeps/i386/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/i386/pt-machine.h
@@ -1,39 +1,45 @@
/* Machine-dependent pthreads configuration and inline functions.
i386 version.
- Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1996-2001, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson <rth@tamu.edu>.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
+#ifndef __ASSEMBLER__
#ifndef PT_EI
# define PT_EI extern inline
#endif
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
-#define CURRENT_STACK_FRAME stack_pointer
-register char * stack_pointer __asm__ ("%esp");
+#define CURRENT_STACK_FRAME __builtin_frame_address (0)
/* Spinlock implementation; required. */
-PT_EI int
+PT_EI long int
testandset (int *spinlock)
{
- int ret;
+ long int ret;
__asm__ __volatile__(
"xchgl %0, %1"
@@ -97,3 +103,6 @@ compare_and_swap_is_available (void)
Otherwise, it's a 486 or above and it has cmpxchg. */
return changed != 0;
}
+#endif /* __ASSEMBLER__ */
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/i386/sigcontextinfo.h b/libpthread/linuxthreads/sysdeps/i386/sigcontextinfo.h
index 42c18b22b..6530ba6f3 100644
--- a/libpthread/linuxthreads/sysdeps/i386/sigcontextinfo.h
+++ b/libpthread/linuxthreads/sysdeps/i386/sigcontextinfo.h
@@ -1,24 +1,51 @@
-/* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+/* Copyright (C) 1998, 1999, 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
- License, or (at your option) any later version.
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
- License along with the GNU C Library; see the file COPYING.LIB. If not,
- write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
#define SIGCONTEXT struct sigcontext
#define SIGCONTEXT_EXTRA_ARGS
#define GET_PC(ctx) ((void *) ctx.eip)
#define GET_FRAME(ctx) ((void *) ctx.ebp)
#define GET_STACK(ctx) ((void *) ctx.esp_at_signal)
+#define CALL_SIGHANDLER(handler, signo, ctx) \
+do { \
+ int __tmp1, __tmp2, __tmp3, __tmp4; \
+ __asm __volatile ("movl\t%%esp, %%edi\n\t" \
+ "andl\t$-16, %%esp\n\t" \
+ "subl\t%8, %%esp\n\t" \
+ "movl\t%%edi, %c8-4(%%esp)\n\t" \
+ "movl\t%1, 0(%%esp)\n\t" \
+ "leal\t4(%%esp), %%edi\n\t" \
+ "cld\n\t" \
+ "rep\tmovsl\n\t" \
+ "call\t*%0\n\t" \
+ "cld\n\t" \
+ "movl\t%9, %%ecx\n\t" \
+ "subl\t%%edi, %%esi\n\t" \
+ "leal\t4(%%esp,%%esi,1), %%edi\n\t" \
+ "leal\t4(%%esp), %%esi\n\t" \
+ "rep\tmovsl\n\t" \
+ "movl\t%c8-4(%%esp), %%esp\n\t" \
+ : "=a" (__tmp1), "=d" (__tmp2), "=S" (__tmp3), \
+ "=c" (__tmp4) \
+ : "0" (handler), "1" (signo), "2" (&ctx), \
+ "3" (sizeof (struct sigcontext) / 4), \
+ "n" ((sizeof (struct sigcontext) + 19) & ~15), \
+ "i" (sizeof (struct sigcontext) / 4) \
+ : "cc", "edi"); \
+} while (0)
diff --git a/libpthread/linuxthreads/sysdeps/i386/stackinfo.h b/libpthread/linuxthreads/sysdeps/i386/stackinfo.h
new file mode 100644
index 000000000..a9a6745aa
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/i386/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 1999 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On x86 the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */
diff --git a/libpthread/linuxthreads/sysdeps/i386/tls.h b/libpthread/linuxthreads/sysdeps/i386/tls.h
new file mode 100644
index 000000000..5d1551e57
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/i386/tls.h
@@ -0,0 +1,183 @@
+/* Definition for thread-local data handling. linuxthreads/i386 version.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _TLS_H
+#define _TLS_H
+
+# include <pt-machine.h>
+
+#ifndef __ASSEMBLER__
+# include <stddef.h>
+
+/* Type for the dtv. */
+typedef union dtv
+{
+ size_t counter;
+ void *pointer;
+} dtv_t;
+
+
+typedef struct
+{
+ void *tcb; /* Pointer to the TCB. Not necessary the
+ thread descriptor used by libpthread. */
+ dtv_t *dtv;
+ void *self; /* Pointer to the thread descriptor. */
+} tcbhead_t;
+#endif
+
+
+/* We can support TLS only if the floating-stack support is available. */
+#if defined FLOATING_STACKS && defined HAVE_TLS_SUPPORT
+
+/* Signal that TLS support is available. */
+//# define USE_TLS 1
+
+# ifndef __ASSEMBLER__
+/* Get system call information. */
+# include <sysdep.h>
+
+
+/* Get the thread descriptor definition. */
+# include <linuxthreads/descr.h>
+
+/* This is the size of the initial TCB. */
+# define TLS_INIT_TCB_SIZE sizeof (tcbhead_t)
+
+/* Alignment requirements for the initial TCB. */
+# define TLS_INIT_TCB_ALIGN __alignof__ (tcbhead_t)
+
+/* This is the size of the TCB. */
+# define TLS_TCB_SIZE sizeof (struct _pthread_descr_struct)
+
+/* Alignment requirements for the TCB. */
+# define TLS_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
+
+/* The TCB can have any size and the memory following the address the
+ thread pointer points to is unspecified. Allocate the TCB there. */
+# define TLS_TCB_AT_TP 1
+
+
+/* Install the dtv pointer. The pointer passed is to the element with
+ index -1 which contain the length. */
+# define INSTALL_DTV(descr, dtvp) \
+ ((tcbhead_t *) (descr))->dtv = (dtvp) + 1
+
+/* Install new dtv for current thread. */
+# define INSTALL_NEW_DTV(dtv) \
+ ({ struct _pthread_descr_struct *__descr; \
+ THREAD_SETMEM (__descr, p_header.data.dtvp, (dtv)); })
+
+/* Return dtv of given thread descriptor. */
+# define GET_DTV(descr) \
+ (((tcbhead_t *) (descr))->dtv)
+
+# ifdef __PIC__
+# define TLS_EBX_ARG "r"
+# define TLS_LOAD_EBX "xchgl %3, %%ebx\n\t"
+# else
+# define TLS_EBX_ARG "b"
+# define TLS_LOAD_EBX
+# endif
+
+# define TLS_DO_MODIFY_LDT(descr, nr) \
+({ \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { nr, (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ int result; \
+ asm volatile (TLS_LOAD_EBX \
+ "int $0x80\n\t" \
+ TLS_LOAD_EBX \
+ : "=a" (result) \
+ : "0" (__NR_modify_ldt), \
+ /* The extra argument with the "m" constraint is necessary \
+ to let the compiler know that we are accessing LDT_ENTRY \
+ here. */ \
+ "m" (ldt_entry), TLS_EBX_ARG (1), "c" (&ldt_entry), \
+ "d" (sizeof (ldt_entry))); \
+ __builtin_expect (result, 0) != 0 ? -1 : nr * 8 + 7; \
+})
+
+# define TLS_DO_SET_THREAD_AREA(descr, secondcall) \
+({ \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { -1, (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ int result; \
+ if (secondcall) \
+ ldt_entry.entry_number = ({ int _gs; \
+ asm ("movw %%gs, %w0" : "=q" (_gs)); \
+ (_gs & 0xffff) >> 3; }); \
+ asm volatile (TLS_LOAD_EBX \
+ "int $0x80\n\t" \
+ TLS_LOAD_EBX \
+ : "=a" (result), "=m" (ldt_entry.entry_number) \
+ : "0" (__NR_set_thread_area), \
+ /* The extra argument with the "m" constraint is necessary \
+ to let the compiler know that we are accessing LDT_ENTRY \
+ here. */ \
+ TLS_EBX_ARG (&ldt_entry), "m" (ldt_entry)); \
+ __builtin_expect (result, 0) == 0 ? ldt_entry.entry_number * 8 + 3 : -1; \
+})
+
+# ifdef __ASSUME_SET_THREAD_AREA_SYSCALL
+# define TLS_SETUP_GS_SEGMENT(descr, secondcall) \
+ TLS_DO_SET_THREAD_AREA (descr, firstcall)
+# elif defined __NR_set_thread_area
+# define TLS_SETUP_GS_SEGMENT(descr, secondcall) \
+ ({ int __seg = TLS_DO_SET_THREAD_AREA (descr, secondcall); \
+ __seg == -1 ? TLS_DO_MODIFY_LDT (descr, 0) : __seg; })
+# else
+# define TLS_SETUP_GS_SEGMENT(descr, secondcall) \
+ TLS_DO_MODIFY_LDT ((descr), 0)
+# endif
+
+/* Code to initially initialize the thread pointer. This might need
+ special attention since 'errno' is not yet available and if the
+ operation can cause a failure 'errno' must not be touched. */
+# define TLS_INIT_TP(descr, secondcall) \
+ ({ \
+ void *_descr = (descr); \
+ tcbhead_t *head = _descr; \
+ int __gs; \
+ \
+ head->tcb = _descr; \
+ /* For now the thread descriptor is at the same address. */ \
+ head->self = _descr; \
+ \
+ __gs = TLS_SETUP_GS_SEGMENT (_descr, secondcall); \
+ if (__builtin_expect (__gs, 7) != -1) \
+ { \
+ asm ("movw %w0, %%gs" : : "q" (__gs)); \
+ __gs = 0; \
+ } \
+ __gs; \
+ })
+
+
+/* Return the address of the dtv for the current thread. */
+# define THREAD_DTV() \
+ ({ struct _pthread_descr_struct *__descr; \
+ THREAD_GETMEM (__descr, p_header.data.dtvp); })
+
+# endif /* FLOATING_STACKS && HAVE_TLS_SUPPORT */
+#endif /* __ASSEMBLER__ */
+
+#endif /* tls.h */
diff --git a/libpthread/linuxthreads/sysdeps/i386/useldt.h b/libpthread/linuxthreads/sysdeps/i386/useldt.h
index 1a789e2e0..16aee9989 100644
--- a/libpthread/linuxthreads/sysdeps/i386/useldt.h
+++ b/libpthread/linuxthreads/sysdeps/i386/useldt.h
@@ -1,29 +1,31 @@
/* Special definitions for ix86 machine using segment register based
thread descriptor.
- Copyright (C) 1998 Free Software Foundation, Inc.
+ Copyright (C) 1998, 2000, 2001, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
+ Boston, MA 02111-1307, USA. */
+#ifndef __ASSEMBLER__
#include <stddef.h> /* For offsetof. */
+#include <stdlib.h> /* For abort(). */
-/* We don't want to include the kernel header. So duplicate the
- information. */
+/* We don't want to include the kernel header. So duplicate the
+ information. */
/* Structure passed on `modify_ldt' call. */
struct modify_ldt_ldt_s
@@ -54,28 +56,128 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
({ \
register pthread_descr __self; \
__asm__ ("movl %%gs:%c1,%0" : "=r" (__self) \
- : "i" (offsetof (struct _pthread_descr_struct, p_self))); \
+ : "i" (offsetof (struct _pthread_descr_struct, \
+ p_header.data.self))); \
__self; \
})
-/* Initialize the thread-unique value. */
-#define INIT_THREAD_SELF(descr, nr) \
-{ \
+
+/* Initialize the thread-unique value. Two possible ways to do it. */
+
+#define DO_MODIFY_LDT(descr, nr) \
+({ \
struct modify_ldt_ldt_s ldt_entry = \
- { nr, (unsigned long int) descr, sizeof (*descr), 1, 0, 0, 0, 0, 1, 0 }; \
+ { nr, (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
if (__modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) \
abort (); \
- __asm__ __volatile__ ("movw %w0, %%gs" : : "r" (nr * 8 + 7)); \
-}
+ asm ("movw %w0, %%gs" : : "q" (nr * 8 + 7)); \
+})
+
+#ifdef __PIC__
+# define USETLS_EBX_ARG "r"
+# define USETLS_LOAD_EBX "xchgl %3, %%ebx\n\t"
+#else
+# define USETLS_EBX_ARG "b"
+# define USETLS_LOAD_EBX
+#endif
+
+/* When using the new set_thread_area call, we don't need to change %gs
+ because we inherited the value set up in the main thread by TLS setup.
+ We need to extract that value and set up the same segment in this
+ thread. */
+#if USE_TLS
+# define DO_SET_THREAD_AREA_REUSE(nr) 1
+#else
+/* Without TLS, we do the initialization of the main thread, where NR == 0. */
+# define DO_SET_THREAD_AREA_REUSE(nr) (!__builtin_constant_p (nr) || (nr))
+#endif
+#define DO_SET_THREAD_AREA(descr, nr) \
+({ \
+ int __gs; \
+ if (DO_SET_THREAD_AREA_REUSE (nr)) \
+ { \
+ asm ("movw %%gs, %w0" : "=q" (__gs)); \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { (__gs & 0xffff) >> 3, \
+ (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ \
+ int __result; \
+ __asm (USETLS_LOAD_EBX \
+ "movl %2, %%eax\n\t" \
+ "int $0x80\n\t" \
+ USETLS_LOAD_EBX \
+ : "&a" (__result) \
+ : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area)); \
+ if (__result == 0) \
+ asm ("movw %w0, %%gs" :: "q" (__gs)); \
+ else \
+ __gs = -1; \
+ } \
+ else \
+ { \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { -1, \
+ (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \
+ 1, 0, 0, 1, 0, 1, 0 }; \
+ int __result; \
+ __asm (USETLS_LOAD_EBX \
+ "movl %2, %%eax\n\t" \
+ "int $0x80\n\t" \
+ USETLS_LOAD_EBX \
+ : "&a" (__result) \
+ : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area)); \
+ if (__result == 0) \
+ { \
+ __gs = (ldt_entry.entry_number << 3) + 3; \
+ asm ("movw %w0, %%gs" : : "q" (__gs)); \
+ } \
+ else \
+ __gs = -1; \
+ } \
+ __gs; \
+})
+
+#if defined __ASSUME_SET_THREAD_AREA_SYSCALL
+# define INIT_THREAD_SELF(descr, nr) DO_SET_THREAD_AREA (descr, nr)
+#elif defined __NR_set_thread_area
+# define INIT_THREAD_SELF(descr, nr) \
+({ \
+ if (__builtin_expect (__have_no_set_thread_area, 0) \
+ || (DO_SET_THREAD_AREA (descr, DO_SET_THREAD_AREA_REUSE (nr)) == -1 \
+ && (__have_no_set_thread_area = 1))) \
+ DO_MODIFY_LDT (descr, nr); \
+})
+/* Defined in pspinlock.c. */
+extern int __have_no_set_thread_area;
+#else
+# define INIT_THREAD_SELF(descr, nr) DO_MODIFY_LDT (descr, nr)
+#endif
/* Free resources associated with thread descriptor. */
-#define FREE_THREAD_SELF(descr, nr) \
+#ifdef __ASSUME_SET_THREAD_AREA_SYSCALL
+#define FREE_THREAD(descr, nr) do { } while (0)
+#elif defined __NR_set_thread_area
+#define FREE_THREAD(descr, nr) \
+{ \
+ int __gs; \
+ __asm__ __volatile__ ("movw %%gs, %w0" : "=q" (__gs)); \
+ if (__builtin_expect (__gs & 4, 0)) \
+ { \
+ struct modify_ldt_ldt_s ldt_entry = \
+ { nr, 0, 0, 0, 0, 1, 0, 1, 0, 0 }; \
+ __modify_ldt (1, &ldt_entry, sizeof (ldt_entry)); \
+ } \
+}
+#else
+#define FREE_THREAD(descr, nr) \
{ \
struct modify_ldt_ldt_s ldt_entry = \
{ nr, 0, 0, 0, 0, 1, 0, 1, 0, 0 }; \
- __asm__ __volatile__ ("movw %w0,%%gs" : : "r" (0)); \
__modify_ldt (1, &ldt_entry, sizeof (ldt_entry)); \
}
+#endif
/* Read member of the thread descriptor directly. */
#define THREAD_GETMEM(descr, member) \
@@ -83,20 +185,28 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
__typeof__ (descr->member) __value; \
if (sizeof (__value) == 1) \
__asm__ __volatile__ ("movb %%gs:%P2,%b0" \
- : "=r" (__value) \
+ : "=q" (__value) \
: "0" (0), \
"i" (offsetof (struct _pthread_descr_struct, \
member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %%gs:%P1,%0" \
+ : "=r" (__value) \
+ : "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
else \
{ \
- if (sizeof (__value) != 4) \
- /* There should not be any value with a size other than 1 or 4. */ \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
abort (); \
\
- __asm__ __volatile__ ("movl %%gs:%P1,%0" \
- : "=r" (__value) \
+ __asm__ __volatile__ ("movl %%gs:%P1,%%eax\n\t" \
+ "movl %%gs:%P2,%%edx" \
+ : "=A" (__value) \
: "i" (offsetof (struct _pthread_descr_struct, \
- member))); \
+ member)), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member) + 4)); \
} \
__value; \
})
@@ -107,18 +217,24 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
__typeof__ (descr->member) __value; \
if (sizeof (__value) == 1) \
__asm__ __volatile__ ("movb %%gs:(%2),%b0" \
- : "=r" (__value) \
+ : "=q" (__value) \
: "0" (0), \
"r" (offsetof (struct _pthread_descr_struct, \
member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %%gs:(%1),%0" \
+ : "=r" (__value) \
+ : "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
else \
{ \
- if (sizeof (__value) != 4) \
- /* There should not be any value with a size other than 1 or 4. */ \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
abort (); \
\
- __asm__ __volatile__ ("movl %%gs:(%1),%0" \
- : "=r" (__value) \
+ __asm__ __volatile__ ("movl %%gs:(%1),%%eax\n\t" \
+ "movl %%gs:4(%1),%%edx" \
+ : "=&A" (__value) \
: "r" (offsetof (struct _pthread_descr_struct, \
member))); \
} \
@@ -131,19 +247,27 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
__typeof__ (descr->member) __value = (value); \
if (sizeof (__value) == 1) \
__asm__ __volatile__ ("movb %0,%%gs:%P1" : \
+ : "q" (__value), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %0,%%gs:%P1" : \
: "r" (__value), \
"i" (offsetof (struct _pthread_descr_struct, \
member))); \
else \
{ \
- if (sizeof (__value) != 4) \
- /* There should not be any value with a size other than 1 or 4. */ \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
abort (); \
\
- __asm__ __volatile__ ("movl %0,%%gs:%P1" : \
- : "r" (__value), \
+ __asm__ __volatile__ ("movl %%eax,%%gs:%P1\n\n" \
+ "movl %%edx,%%gs:%P2" : \
+ : "A" (__value), \
"i" (offsetof (struct _pthread_descr_struct, \
- member))); \
+ member)), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member) + 4)); \
} \
})
@@ -153,18 +277,31 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
__typeof__ (descr->member) __value = (value); \
if (sizeof (__value) == 1) \
__asm__ __volatile__ ("movb %0,%%gs:(%1)" : \
+ : "q" (__value), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %0,%%gs:(%1)" : \
: "r" (__value), \
"r" (offsetof (struct _pthread_descr_struct, \
member))); \
else \
{ \
- if (sizeof (__value) != 4) \
- /* There should not be any value with a size other than 1 or 4. */ \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
abort (); \
\
- __asm__ __volatile__ ("movl %0,%%gs:(%1)" : \
- : "r" (__value), \
+ __asm__ __volatile__ ("movl %%eax,%%gs:(%1)\n\t" \
+ "movl %%edx,%%gs:4(%1)" : \
+ : "A" (__value), \
"r" (offsetof (struct _pthread_descr_struct, \
member))); \
} \
})
+#endif
+
+/* We want the OS to assign stack addresses. */
+#define FLOATING_STACKS 1
+
+/* Maximum size of the stack if the rlimit is unlimited. */
+#define ARCH_STACK_MAX_SIZE 8*1024*1024
diff --git a/libpthread/linuxthreads/sysdeps/m68k/pt-machine.h b/libpthread/linuxthreads/sysdeps/m68k/pt-machine.h
index 644bc2f0c..161b53469 100644
--- a/libpthread/linuxthreads/sysdeps/m68k/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/m68k/pt-machine.h
@@ -1,41 +1,41 @@
/* Machine-dependent pthreads configuration and inline functions.
m68k version.
- Copyright (C) 1996, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1998, 2000, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson <rth@tamu.edu>.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
#ifndef PT_EI
# define PT_EI extern inline
#endif
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
/* Spinlock implementation; required. */
-PT_EI int
+PT_EI long int
testandset (int *spinlock)
{
char ret;
- __asm__ __volatile__(
-#ifdef __mcf5200__
- "bset #7,%1; sne %0"
-#else
- "tas %1; sne %0"
-#endif
+ __asm__ __volatile__("tas %1; sne %0"
: "=dm"(ret), "=m"(*spinlock)
: "m"(*spinlock)
: "cc");
@@ -52,7 +52,6 @@ register char * stack_pointer __asm__ ("%sp");
/* Compare-and-swap for semaphores. */
-#ifndef __mcf5200__
#define HAS_COMPARE_AND_SWAP
PT_EI int
__compare_and_swap (long int *p, long int oldval, long int newval)
@@ -66,5 +65,5 @@ __compare_and_swap (long int *p, long int oldval, long int newval)
return ret;
}
-#endif
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/m68k/stackinfo.h b/libpthread/linuxthreads/sysdeps/m68k/stackinfo.h
new file mode 100644
index 000000000..66e5a17fb
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/m68k/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 1999 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On m68k the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */
diff --git a/libpthread/linuxthreads/sysdeps/mips/pt-machine.h b/libpthread/linuxthreads/sysdeps/mips/pt-machine.h
index c40ab9e3e..59d26740f 100644
--- a/libpthread/linuxthreads/sysdeps/mips/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/mips/pt-machine.h
@@ -1,59 +1,44 @@
/* Machine-dependent pthreads configuration and inline functions.
- Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 2000, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
- Contributed by Ralf Baechle <ralf@gnu.ai.mit.edu>.
+ Contributed by Ralf Baechle <ralf@gnu.org>.
Based on the Alpha version by Richard Henderson <rth@tamu.edu>.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, write to the Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- TODO: This version makes use of MIPS ISA 2 features. It won't
- work on ISA 1. These machines will have to take the overhead of
- a sysmips(MIPS_ATOMIC_SET, ...) syscall which isn't implemented
- yet correctly. There is however a better solution for R3000
- uniprocessor machines possible. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
+#include <sys/tas.h>
#ifndef PT_EI
# define PT_EI extern inline
#endif
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
/* Spinlock implementation; required. */
+
PT_EI long int
testandset (int *spinlock)
{
- long int ret, temp;
-
- __asm__ __volatile__(
- "# Inline spinlock test & set\n\t"
- ".set\tmips2\n"
- "1:\tll\t%0,%3\n\t"
- "bnez\t%0,2f\n\t"
- ".set\tnoreorder\n\t"
- "li\t%1,1\n\t"
- ".set\treorder\n\t"
- "sc\t%1,%2\n\t"
- "beqz\t%1,1b\n"
- "2:\t.set\tmips0\n\t"
- "/* End spinlock test & set */"
- : "=&r"(ret), "=&r" (temp), "=m"(*spinlock)
- : "m"(*spinlock)
- : "memory");
-
- return ret;
+ return _test_and_set (spinlock, 1);
}
@@ -69,49 +54,27 @@ register char * stack_pointer __asm__ ("$29");
PT_EI int
__compare_and_swap (long int *p, long int oldval, long int newval)
{
- long ret;
-
- __asm__ __volatile__ (
- "/* Inline compare & swap */\n\t"
- ".set\tmips2\n"
- "1:\tll\t%0,%4\n\t"
- ".set\tnoreorder\n\t"
- "bne\t%0,%2,2f\n\t"
- "move\t%0,$0\n\t" /*[NDF] Failure case. */
- "move\t%0,%3\n\t"
- ".set\treorder\n\t"
- "sc\t%0,%1\n\t"
- "beqz\t%0,1b\n"
- "2:\t.set\tmips0\n\t"
- "/* End compare & swap */"
- : "=&r"(ret), "=m"(*p)
- : "r"(oldval), "r"(newval), "m"(*p));
-
- return ret;
-
- /*
- 1: load locked: into ret(%0), from *p(0(%4))
- branch to 2 if ret(%0) != oldval(%2)
- Delay slot: move 0 into ret(%0) // [NDF] Added
- Don't branch case:
- move newval(%3) into ret(%0)
- setcompare ret(%0) into *p(0(%1))
- branch to 1 if ret(%0) == 0 (sc failed)
- Delay slot: unknown/none
- return
-
- 2: Delay slot
- return
-
-ll a b
-Sets a to the value pointed to by address b, and "locks" b so that if
-any of a number of things are attempted that might access b then the
-next sc will fail.
-
-sc a b
-Sets the memory address pointed to by b to the value in a atomically.
-If it succeeds then a will be set to 1, if it fails a will be set to 0.
+ long int ret, temp;
- */
+ __asm__ __volatile__
+ ("/* Inline compare & swap */\n"
+ "1:\n\t"
+ ".set push\n\t"
+ ".set mips2\n\t"
+ "ll %1,%5\n\t"
+ "move %0,$0\n\t"
+ "bne %1,%3,2f\n\t"
+ "move %0,%4\n\t"
+ "sc %0,%2\n\t"
+ ".set pop\n\t"
+ "beqz %0,1b\n"
+ "2:\n\t"
+ "/* End compare & swap */"
+ : "=&r" (ret), "=&r" (temp), "=m" (*p)
+ : "r" (oldval), "r" (newval), "m" (*p)
+ : "memory");
+ return ret;
}
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/mips/stackinfo.h b/libpthread/linuxthreads/sysdeps/mips/stackinfo.h
new file mode 100644
index 000000000..86e3d621b
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/mips/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On MIPS the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h b/libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h
index 578369a7f..19b77b7e6 100644
--- a/libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h
@@ -1,19 +1,19 @@
/* Machine-dependent pthreads configuration and inline functions.
powerpc version.
- Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 2000, 2001, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
@@ -21,18 +21,20 @@
/* These routines are from Appendix G of the 'PowerPC 601 RISC Microprocessor
User's Manual', by IBM and Motorola. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
#ifndef PT_EI
# define PT_EI extern inline
#endif
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
/* For multiprocessor systems, we want to ensure all memory accesses
- are completed before we reset a lock. */
-#if 0
-/* on non multiprocessor systems, you can just: */
-#define sync() /* nothing */
-#else
-#define sync() __asm__ __volatile__ ("sync")
-#endif
+ are completed before we reset a lock. On other systems, we still
+ need to make sure that the compiler has flushed everything to memory. */
+#define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory")
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
@@ -40,30 +42,52 @@
register char * stack_pointer __asm__ ("r1");
/* Compare-and-swap for semaphores. */
-/* note that test-and-set(x) is the same as compare-and-swap(x, 0, 1) */
+/* note that test-and-set(x) is the same as !compare-and-swap(x, 0, 1) */
-#define HAS_COMPARE_AND_SWAP
-#if BROKEN_PPC_ASM_CR0
-static
-#else
-PT_EI
-#endif
-int
+#define HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
+#define IMPLEMENT_TAS_WITH_CAS
+
+PT_EI int
__compare_and_swap (long int *p, long int oldval, long int newval)
{
int ret;
- sync();
- __asm__ __volatile__(
- "0: lwarx %0,0,%1 ;"
- " xor. %0,%3,%0;"
- " bne 1f;"
- " stwcx. %2,0,%1;"
- " bne- 0b;"
- "1: "
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stwcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
: "=&r"(ret)
: "r"(p), "r"(newval), "r"(oldval)
: "cr0", "memory");
- sync();
+ /* This version of __compare_and_swap is to be used when acquiring
+ a lock, so we don't need to worry about whether other memory
+ operations have completed, but we do need to be sure that any loads
+ after this point really occur after we have acquired the lock. */
+ __asm__ __volatile__ ("isync" : : : "memory");
return ret == 0;
}
+
+PT_EI int
+__compare_and_swap_with_release_semantics (long int *p,
+ long int oldval, long int newval)
+{
+ int ret;
+
+ MEMORY_BARRIER ();
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stwcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r"(ret)
+ : "r"(p), "r"(newval), "r"(oldval)
+ : "cr0", "memory");
+ return ret == 0;
+}
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/stackinfo.h b/libpthread/linuxthreads/sysdeps/powerpc/stackinfo.h
new file mode 100644
index 000000000..839758a4e
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/powerpc/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 1999 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On PPC the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */
diff --git a/libpthread/linuxthreads/sysdeps/pthread/bits/libc-lock.h b/libpthread/linuxthreads/sysdeps/pthread/bits/libc-lock.h
new file mode 100644
index 000000000..4e884a030
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/pthread/bits/libc-lock.h
@@ -0,0 +1,327 @@
+/* libc-internal interface for mutex locks. LinuxThreads version.
+ Copyright (C) 1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#ifndef _BITS_LIBC_LOCK_H
+#define _BITS_LIBC_LOCK_H 1
+
+#include <pthread.h>
+
+/* Mutex type. */
+#if defined(_LIBC) || defined(_IO_MTSAFE_IO)
+typedef pthread_mutex_t __libc_lock_t;
+typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
+# ifdef __USE_UNIX98
+typedef pthread_rwlock_t __libc_rwlock_t;
+# else
+typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
+# endif
+#else
+typedef struct __libc_lock_opaque__ __libc_lock_t;
+typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
+typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
+#endif
+
+/* Type for key to thread-specific data. */
+typedef pthread_key_t __libc_key_t;
+
+/* Define a lock variable NAME with storage class CLASS. The lock must be
+ initialized with __libc_lock_init before it can be used (or define it
+ with __libc_lock_define_initialized, below). Use `extern' for CLASS to
+ declare a lock defined in another module. In public structure
+ definitions you must use a pointer to the lock structure (i.e., NAME
+ begins with a `*'), because its storage size will not be known outside
+ of libc. */
+#define __libc_lock_define(CLASS,NAME) \
+ CLASS __libc_lock_t NAME;
+#define __libc_rwlock_define(CLASS,NAME) \
+ CLASS __libc_rwlock_t NAME;
+#define __libc_lock_define_recursive(CLASS,NAME) \
+ CLASS __libc_lock_recursive_t NAME;
+
+/* Define an initialized lock variable NAME with storage class CLASS.
+
+ For the C library we take a deeper look at the initializer. For
+ this implementation all fields are initialized to zero. Therefore
+ we don't initialize the variable which allows putting it into the
+ BSS section. (Except on PA-RISC and other odd architectures, where
+ initialized locks must be set to one due to the lack of normal
+ atomic operations.) */
+
+#if __LT_SPINLOCK_INIT == 0
+# define __libc_lock_define_initialized(CLASS,NAME) \
+ CLASS __libc_lock_t NAME;
+#else
+# define __libc_lock_define_initialized(CLASS,NAME) \
+ CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+#define __libc_rwlock_define_initialized(CLASS,NAME) \
+ CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
+
+/* Define an initialized recursive lock variable NAME with storage
+ class CLASS. */
+#define __libc_lock_define_initialized_recursive(CLASS,NAME) \
+ CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
+#define _LIBC_LOCK_RECURSIVE_INITIALIZER \
+ {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
+
+/* Initialize the named lock variable, leaving it in a consistent, unlocked
+ state. */
+#define __libc_lock_init(NAME) \
+ (__pthread_mutex_init != NULL ? __pthread_mutex_init (&(NAME), NULL) : 0);
+#define __libc_rwlock_init(NAME) \
+ (__pthread_rwlock_init != NULL ? __pthread_rwlock_init (&(NAME), NULL) : 0);
+
+/* Same as last but this time we initialize a recursive mutex. */
+#define __libc_lock_init_recursive(NAME) \
+ do { \
+ if (__pthread_mutex_init != NULL) \
+ { \
+ pthread_mutexattr_t __attr; \
+ __pthread_mutexattr_init (&__attr); \
+ __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
+ __pthread_mutex_init (&(NAME).mutex, &__attr); \
+ __pthread_mutexattr_destroy (&__attr); \
+ } \
+ } while (0);
+
+/* Finalize the named lock variable, which must be locked. It cannot be
+ used again until __libc_lock_init is called again on it. This must be
+ called on a lock variable before the containing storage is reused. */
+#define __libc_lock_fini(NAME) \
+ (__pthread_mutex_destroy != NULL ? __pthread_mutex_destroy (&(NAME)) : 0);
+#define __libc_rwlock_fini(NAME) \
+ (__pthread_rwlock_destroy != NULL ? __pthread_rwlock_destroy (&(NAME)) : 0);
+
+/* Finalize recursive named lock. */
+#define __libc_lock_fini_recursive(NAME) __libc_lock_fini ((NAME).mutex)
+
+/* Lock the named lock variable. */
+#define __libc_lock_lock(NAME) \
+ (__pthread_mutex_lock != NULL ? __pthread_mutex_lock (&(NAME)) : 0);
+#define __libc_rwlock_rdlock(NAME) \
+ (__pthread_rwlock_rdlock != NULL ? __pthread_rwlock_rdlock (&(NAME)) : 0);
+#define __libc_rwlock_wrlock(NAME) \
+ (__pthread_rwlock_wrlock != NULL ? __pthread_rwlock_wrlock (&(NAME)) : 0);
+
+/* Lock the recursive named lock variable. */
+#define __libc_lock_lock_recursive(NAME) __libc_lock_lock ((NAME).mutex)
+
+/* Try to lock the named lock variable. */
+#define __libc_lock_trylock(NAME) \
+ (__pthread_mutex_trylock != NULL ? __pthread_mutex_trylock (&(NAME)) : 0)
+#define __libc_rwlock_tryrdlock(NAME) \
+ (__pthread_rwlock_tryrdlock != NULL \
+ ? __pthread_rwlock_tryrdlock (&(NAME)) : 0)
+#define __libc_rwlock_trywrlock(NAME) \
+ (__pthread_rwlock_trywrlock != NULL \
+ ? __pthread_rwlock_trywrlock (&(NAME)) : 0)
+
+/* Try to lock the recursive named lock variable. */
+#define __libc_lock_trylock_recursive(NAME) __libc_lock_trylock ((NAME).mutex)
+
+/* Unlock the named lock variable. */
+#define __libc_lock_unlock(NAME) \
+ (__pthread_mutex_unlock != NULL ? __pthread_mutex_unlock (&(NAME)) : 0);
+#define __libc_rwlock_unlock(NAME) \
+ (__pthread_rwlock_unlock != NULL ? __pthread_rwlock_unlock (&(NAME)) : 0);
+
+/* Unlock the recursive named lock variable. */
+#define __libc_lock_unlock_recursive(NAME) __libc_lock_unlock ((NAME).mutex)
+
+
+/* Define once control variable. */
+#if PTHREAD_ONCE_INIT == 0
+/* Special case for static variables where we can avoid the initialization
+ if it is zero. */
+# define __libc_once_define(CLASS, NAME) \
+ CLASS pthread_once_t NAME
+#else
+# define __libc_once_define(CLASS, NAME) \
+ CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
+#endif
+
+/* Call handler iff the first call. */
+#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
+ do { \
+ if (__pthread_once != NULL) \
+ __pthread_once (&(ONCE_CONTROL), (INIT_FUNCTION)); \
+ else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
+ INIT_FUNCTION (); \
+ (ONCE_CONTROL) = !PTHREAD_ONCE_INIT; \
+ } \
+ } while (0)
+
+
+/* Start critical region with cleanup. */
+#define __libc_cleanup_region_start(DOIT, FCT, ARG) \
+ { struct _pthread_cleanup_buffer _buffer; \
+ int _avail = (DOIT) && _pthread_cleanup_push_defer != NULL; \
+ if (_avail) { \
+ _pthread_cleanup_push_defer (&_buffer, (FCT), (ARG)); \
+ }
+
+/* End critical region with cleanup. */
+#define __libc_cleanup_region_end(DOIT) \
+ if (_avail) { \
+ _pthread_cleanup_pop_restore (&_buffer, (DOIT)); \
+ } \
+ }
+
+/* Sometimes we have to exit the block in the middle. */
+#define __libc_cleanup_end(DOIT) \
+ if (_avail) { \
+ _pthread_cleanup_pop_restore (&_buffer, (DOIT)); \
+ }
+
+/* Create thread-specific key. */
+#define __libc_key_create(KEY, DESTRUCTOR) \
+ (__pthread_key_create != NULL ? __pthread_key_create (KEY, DESTRUCTOR) : 1)
+
+/* Get thread-specific data. */
+#define __libc_getspecific(KEY) \
+ (__pthread_getspecific != NULL ? __pthread_getspecific (KEY) : NULL)
+
+/* Set thread-specific data. */
+#define __libc_setspecific(KEY, VALUE) \
+ (__pthread_setspecific != NULL ? __pthread_setspecific (KEY, VALUE) : 0)
+
+
+/* Register handlers to execute before and after `fork'. */
+#define __libc_atfork(PREPARE, PARENT, CHILD) \
+ (__pthread_atfork != NULL ? __pthread_atfork (PREPARE, PARENT, CHILD) : 0)
+
+/* Functions that are used by this file and are internal to the GNU C
+ library. */
+
+extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
+ __const pthread_mutexattr_t *__mutex_attr);
+
+extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
+
+extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
+
+extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
+
+extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
+
+extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
+
+extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
+
+extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
+ int __kind);
+
+#ifdef __USE_UNIX98
+extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
+ __const pthread_rwlockattr_t *__attr);
+
+extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
+
+extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
+#endif
+
+extern int __pthread_key_create (pthread_key_t *__key,
+ void (*__destr_function) (void *));
+
+extern int __pthread_setspecific (pthread_key_t __key,
+ __const void *__pointer);
+
+extern void *__pthread_getspecific (pthread_key_t __key);
+
+extern int __pthread_once (pthread_once_t *__once_control,
+ void (*__init_routine) (void));
+
+extern int __pthread_atfork (void (*__prepare) (void),
+ void (*__parent) (void),
+ void (*__child) (void));
+
+
+
+/* Make the pthread functions weak so that we can elide them from
+ single-threaded processes. */
+#ifndef __NO_WEAK_PTHREAD_ALIASES
+# ifdef weak_extern
+# if _LIBC
+# include <bp-sym.h>
+# else
+# define BP_SYM (sym) sym
+# endif
+weak_extern (BP_SYM (__pthread_mutex_init))
+weak_extern (BP_SYM (__pthread_mutex_destroy))
+weak_extern (BP_SYM (__pthread_mutex_lock))
+weak_extern (BP_SYM (__pthread_mutex_trylock))
+weak_extern (BP_SYM (__pthread_mutex_unlock))
+weak_extern (BP_SYM (__pthread_mutexattr_init))
+weak_extern (BP_SYM (__pthread_mutexattr_destroy))
+weak_extern (BP_SYM (__pthread_mutexattr_settype))
+weak_extern (BP_SYM (__pthread_rwlock_init))
+weak_extern (BP_SYM (__pthread_rwlock_destroy))
+weak_extern (BP_SYM (__pthread_rwlock_rdlock))
+weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
+weak_extern (BP_SYM (__pthread_rwlock_wrlock))
+weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
+weak_extern (BP_SYM (__pthread_rwlock_unlock))
+weak_extern (BP_SYM (__pthread_key_create))
+weak_extern (BP_SYM (__pthread_setspecific))
+weak_extern (BP_SYM (__pthread_getspecific))
+weak_extern (BP_SYM (__pthread_once))
+weak_extern (__pthread_initialize)
+weak_extern (__pthread_atfork)
+weak_extern (BP_SYM (_pthread_cleanup_push_defer))
+weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
+# else
+# pragma weak __pthread_mutex_init
+# pragma weak __pthread_mutex_destroy
+# pragma weak __pthread_mutex_lock
+# pragma weak __pthread_mutex_trylock
+# pragma weak __pthread_mutex_unlock
+# pragma weak __pthread_mutexattr_init
+# pragma weak __pthread_mutexattr_destroy
+# pragma weak __pthread_mutexattr_settype
+# pragma weak __pthread_rwlock_destroy
+# pragma weak __pthread_rwlock_rdlock
+# pragma weak __pthread_rwlock_tryrdlock
+# pragma weak __pthread_rwlock_wrlock
+# pragma weak __pthread_rwlock_trywrlock
+# pragma weak __pthread_rwlock_unlock
+# pragma weak __pthread_key_create
+# pragma weak __pthread_setspecific
+# pragma weak __pthread_getspecific
+# pragma weak __pthread_once
+# pragma weak __pthread_initialize
+# pragma weak __pthread_atfork
+# pragma weak _pthread_cleanup_push_defer
+# pragma weak _pthread_cleanup_pop_restore
+# endif
+#endif
+
+/* We need portable names for some functions. E.g., when they are
+ used as argument to __libc_cleanup_region_start. */
+#define __libc_mutex_unlock __pthread_mutex_unlock
+
+#endif /* bits/libc-lock.h */
diff --git a/libpthread/linuxthreads/sysdeps/pthread/bits/libc-tsd.h b/libpthread/linuxthreads/sysdeps/pthread/bits/libc-tsd.h
index ca53b94d2..35a6a19a6 100644
--- a/libpthread/linuxthreads/sysdeps/pthread/bits/libc-tsd.h
+++ b/libpthread/linuxthreads/sysdeps/pthread/bits/libc-tsd.h
@@ -1,18 +1,18 @@
/* libc-internal interface for thread-specific data. LinuxThreads version.
- Copyright (C) 1997, 1998, 1999, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1997,98,99,2001,02 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
@@ -20,17 +20,36 @@
#ifndef _BITS_LIBC_TSD_H
#define _BITS_LIBC_TSD_H 1
-#include <features.h>
-
/* Fast thread-specific data internal to libc. */
enum __libc_tsd_key_t { _LIBC_TSD_KEY_MALLOC = 0,
_LIBC_TSD_KEY_DL_ERROR,
_LIBC_TSD_KEY_RPC_VARS,
+ _LIBC_TSD_KEY_LOCALE,
+ _LIBC_TSD_KEY_CTYPE_B,
+ _LIBC_TSD_KEY_CTYPE_TOLOWER,
+ _LIBC_TSD_KEY_CTYPE_TOUPPER,
_LIBC_TSD_KEY_N };
+#include <sys/cdefs.h>
+#include <tls.h>
+
+#if USE_TLS && HAVE___THREAD
+
+/* When __thread works, the generic definition is what we want. */
+# include <sysdeps/generic/bits/libc-tsd.h>
+
+#else
+
extern void *(*__libc_internal_tsd_get) (enum __libc_tsd_key_t) __THROW;
extern int (*__libc_internal_tsd_set) (enum __libc_tsd_key_t,
__const void *) __THROW;
+extern void **(*const __libc_internal_tsd_address) (enum __libc_tsd_key_t)
+ __THROW __attribute__ ((__const__));
+
+#define __libc_tsd_address(KEY) \
+ (__libc_internal_tsd_address != NULL \
+ ? __libc_internal_tsd_address (_LIBC_TSD_KEY_##KEY) \
+ : &__libc_tsd_##KEY##_data)
#define __libc_tsd_define(CLASS, KEY) CLASS void *__libc_tsd_##KEY##_data;
#define __libc_tsd_get(KEY) \
@@ -42,4 +61,6 @@ extern int (*__libc_internal_tsd_set) (enum __libc_tsd_key_t,
? __libc_internal_tsd_set (_LIBC_TSD_KEY_##KEY, (VALUE)) \
: ((__libc_tsd_##KEY##_data = (VALUE)), 0))
+#endif
+
#endif /* bits/libc-tsd.h */
diff --git a/libpthread/linuxthreads/sysdeps/pthread/tls.h b/libpthread/linuxthreads/sysdeps/pthread/tls.h
new file mode 100644
index 000000000..6a23ec05e
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/pthread/tls.h
@@ -0,0 +1,81 @@
+/* Definition for thread-local data handling. Generic version.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* By default no TLS support is available. This is signaled by the
+ absence of the symbol USE_TLS. */
+#undef USE_TLS
+
+
+/* An architecture-specific version of this file has to defined a
+ number of symbols:
+
+ TLS_TCB_AT_TP or TLS_DTV_AT_TP
+
+ The presence of one of these symbols signals which variant of
+ the TLS ABI is used. There are in the moment two variants
+ available:
+
+ * the thread pointer points to a thread control block
+
+ * the thread pointer points to the dynamic thread vector
+
+
+ TLS_TCB_SIZE
+
+ This is the size of the thread control block structure. How
+ this is actually defined depends on the ABI. The thread control
+ block could be internal descriptor of the thread library or
+ just a data structure which allows finding the DTV.
+
+ TLS_INIT_TCB_SIZE
+
+ Similarly, but this value is only used at startup and in the
+ dynamic linker itself. There are no threads in use at that time.
+
+
+ TLS_TCB_ALIGN
+
+ Alignment requirements for the TCB structure.
+
+ TLS_INIT_TCB_ALIGN
+
+ Similarly, but for the structure used at startup time.
+
+
+ INSTALL_DTV(tcb, init_dtv)
+
+ This macro must install the given initial DTV into the thread control
+ block TCB. The normal runtime functionality must then be able to
+ use the value.
+
+
+ TLS_INIT_TP(tcb, firstcall)
+
+ This macro must initialize the thread pointer to enable normal TLS
+ operation. The first parameter is a pointer to the thread control
+ block. The second parameter specifies whether this is the first
+ call for the TCB. ld.so calls this macro more than once.
+
+
+ THREAD_DTV()
+
+ This macro returns the address of the DTV of the current thread.
+ This normally is done using the the thread register which points
+ to the dtv or the TCB (from which the DTV can found).
+ */
diff --git a/libpthread/linuxthreads/sysdeps/sh/pt-machine.h b/libpthread/linuxthreads/sysdeps/sh/pt-machine.h
index 7287dc936..cc3a4f2ce 100644
--- a/libpthread/linuxthreads/sysdeps/sh/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/sh/pt-machine.h
@@ -1,28 +1,35 @@
/* Machine-dependent pthreads configuration and inline functions.
SuperH version.
- Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Niibe Yutaka <gniibe@m17n.org>.
The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
+ Lesser General Public License for more details.
- You should have received a copy of the GNU Library General Public
+ You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
+#ifndef __ASSEMBLER__
#ifndef PT_EI
# define PT_EI extern inline
#endif
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
/* Spinlock implementation; required. */
PT_EI long int
testandset (int *spinlock)
@@ -39,6 +46,11 @@ testandset (int *spinlock)
return (ret == 0);
}
+/* We want the OS to assign stack addresses. */
+#define FLOATING_STACKS 1
+
+/* Maximum size of the stack if the rlimit is unlimited. */
+#define ARCH_STACK_MAX_SIZE 32*1024*1024
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
@@ -53,4 +65,13 @@ struct _pthread_descr_struct;
/* Initialize the thread-unique value. */
#define INIT_THREAD_SELF(descr, nr) \
- ({ __asm__("ldc %0,gbr" : : "r" (descr));})
+ ({ __asm__ __volatile__("ldc %0,gbr" : : "r" (descr));})
+
+/* Access to data in the thread descriptor is easy. */
+#define THREAD_GETMEM(descr, member) THREAD_SELF->member
+#define THREAD_GETMEM_NC(descr, member) THREAD_SELF->member
+#define THREAD_SETMEM(descr, member, value) THREAD_SELF->member = (value)
+#define THREAD_SETMEM_NC(descr, member, value) THREAD_SELF->member = (value)
+#endif /* __ASSEMBLER__ */
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/sh/stackinfo.h b/libpthread/linuxthreads/sysdeps/sh/stackinfo.h
new file mode 100644
index 000000000..e65338f25
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/sh/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 2001 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On SH the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */
diff --git a/libpthread/linuxthreads/sysdeps/sh/tls.h b/libpthread/linuxthreads/sysdeps/sh/tls.h
new file mode 100644
index 000000000..ee3db5ae8
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/sh/tls.h
@@ -0,0 +1,115 @@
+/* Definition for thread-local data handling. linuxthreads/SH version.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _TLS_H
+#define _TLS_H
+
+#ifndef __ASSEMBLER__
+#include <stddef.h>
+
+#include <pt-machine.h>
+
+/* Type for the dtv. */
+typedef union dtv
+{
+ size_t counter;
+ void *pointer;
+} dtv_t;
+
+
+typedef struct
+{
+ void *tcb; /* Pointer to the TCB. Not necessary the
+ thread descriptor used by libpthread. */
+ dtv_t *dtv;
+ void *self; /* Pointer to the thread descriptor. */
+} tcbhead_t;
+
+
+/* We can support TLS only if the floating-stack support is available. */
+#if defined FLOATING_STACKS && defined HAVE_TLS_SUPPORT
+
+/* Get system call information. */
+# include <sysdep.h>
+
+/* Signal that TLS support is available. */
+//# define USE_TLS 1
+
+
+/* Get the thread descriptor definition. */
+# include <linuxthreads/descr.h>
+
+/* This is the size of the initial TCB. */
+# define TLS_INIT_TCB_SIZE sizeof (tcbhead_t)
+
+/* Alignment requirements for the initial TCB. */
+# define TLS_INIT_TCB_ALIGN __alignof__ (tcbhead_t)
+
+/* This is the size of the TCB. */
+# define TLS_TCB_SIZE sizeof (struct _pthread_descr_struct)
+
+/* Alignment requirements for the TCB. */
+# define TLS_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
+
+/* The TLS blocks start right after the TCB. */
+# define TLS_DTV_AT_TP 1
+
+
+/* Install the dtv pointer. The pointer passed is to the element with
+ index -1 which contain the length. */
+# define INSTALL_DTV(descr, dtvp) \
+ ((tcbhead_t *) (descr))->dtv = dtvp + 1
+
+/* Install new dtv for current thread. */
+# define INSTALL_NEW_DTV(dtv) \
+ ({ struct _pthread_descr_struct *__descr; \
+ THREAD_SETMEM (__descr, p_header.data.dtvp, (dtv)); })
+
+/* Return dtv of given thread descriptor. */
+# define GET_DTV(descr) \
+ (((tcbhead_t *) (descr))->dtv)
+
+/* Code to initially initialize the thread pointer. This might need
+ special attention since 'errno' is not yet available and if the
+ operation can cause a failure 'errno' must not be touched. */
+# define TLS_INIT_TP(descr, secondcall) \
+ ({ \
+ void *_descr = (descr); \
+ int result; \
+ tcbhead_t *head = _descr; \
+ \
+ head->tcb = _descr; \
+ /* For now the thread descriptor is at the same address. */ \
+ head->self = _descr; \
+ \
+ asm ("ldc %0,gbr" : : "r" (_descr)); \
+ \
+ 0; \
+ })
+
+
+/* Return the address of the dtv for the current thread. */
+# define THREAD_DTV() \
+ ({ struct _pthread_descr_struct *__descr; \
+ THREAD_GETMEM (__descr, p_header.data.dtvp); })
+
+#endif /* FLOATING_STACKS && HAVE_TLS_SUPPORT */
+#endif /* __ASSEMBLER__ */
+
+#endif /* tls.h */
diff --git a/libpthread/linuxthreads/sysdeps/sparc/stackinfo.h b/libpthread/linuxthreads/sysdeps/sparc/stackinfo.h
new file mode 100644
index 000000000..fd34e2deb
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/sparc/stackinfo.h
@@ -0,0 +1,28 @@
+/* Copyright (C) 2001 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* This file contains a bit of information about the stack allocation
+ of the processor. */
+
+#ifndef _STACKINFO_H
+#define _STACKINFO_H 1
+
+/* On sparc the stack grows down. */
+#define _STACK_GROWS_DOWN 1
+
+#endif /* stackinfo.h */