summaryrefslogtreecommitdiff
path: root/libpthread/linuxthreads/internals.h
diff options
context:
space:
mode:
authorMike Frysinger <vapier@gentoo.org>2005-11-15 03:02:51 +0000
committerMike Frysinger <vapier@gentoo.org>2005-11-15 03:02:51 +0000
commit5a69eba90b5b961ceb723d816ce8401582980477 (patch)
tree7d9cdbc2e9199c5f5dfc7ab34df0c89f18d38961 /libpthread/linuxthreads/internals.h
parent87601389ad2fbd9e0655ae0a391bf7edfdb771c2 (diff)
revert linuxthreads to pre rev 11377 (i.e. before the massive attempt to import glibc updates) while keeping the few bugfixes ... idea is to keep both old and new linuxthreads around so we can hack on the new version while delivering the old stable version to end users
Diffstat (limited to 'libpthread/linuxthreads/internals.h')
-rw-r--r--libpthread/linuxthreads/internals.h287
1 files changed, 229 insertions, 58 deletions
diff --git a/libpthread/linuxthreads/internals.h b/libpthread/linuxthreads/internals.h
index 98dcf4db2..50a4d2d59 100644
--- a/libpthread/linuxthreads/internals.h
+++ b/libpthread/linuxthreads/internals.h
@@ -13,28 +13,24 @@
/* GNU Library General Public License for more details. */
#ifndef _INTERNALS_H
-#define _INTERNALS_H 1
+#define _INTERNALS_H 1
/* Internal data structures */
/* Includes */
-#include <features.h>
#include <bits/libc-tsd.h> /* for _LIBC_TSD_KEY_N */
#include <limits.h>
#include <setjmp.h>
#include <signal.h>
#include <unistd.h>
-#include <bits/stackinfo.h>
#include <sys/types.h>
#include "pt-machine.h"
-#include <ucontext.h>
-#include <bits/sigcontextinfo.h>
#include "semaphore.h"
+#include "../linuxthreads_db/thread_dbP.h"
#ifdef __UCLIBC_HAS_XLOCALE__
#include <bits/uClibc_locale.h>
#endif /* __UCLIBC_HAS_XLOCALE__ */
-#include "descr.h"
/* Use a funky version in a probably vein attempt at preventing gdb
* from dlopen()'ing glibc's libthread_db library... */
@@ -55,14 +51,30 @@
# define THREAD_SETMEM_NC(descr, member, value) descr->member = (value)
#endif
-#if !defined NOT_IN_libc && defined FLOATING_STACKS
-# define LIBC_THREAD_GETMEM(descr, member) THREAD_GETMEM (descr, member)
-# define LIBC_THREAD_SETMEM(descr, member, value) \
- THREAD_SETMEM (descr, member, value)
-#else
-# define LIBC_THREAD_GETMEM(descr, member) descr->member
-# define LIBC_THREAD_SETMEM(descr, member, value) descr->member = (value)
-#endif
+/* Arguments passed to thread creation routine */
+
+struct pthread_start_args {
+ void * (*start_routine)(void *); /* function to run */
+ void * arg; /* its argument */
+ sigset_t mask; /* initial signal mask for thread */
+ int schedpolicy; /* initial scheduling policy (if any) */
+ struct sched_param schedparam; /* initial scheduling parameters (if any) */
+};
+
+
+/* We keep thread specific data in a special data structure, a two-level
+ array. The top-level array contains pointers to dynamically allocated
+ arrays of a certain number of data pointers. So we can implement a
+ sparse array. Each dynamic second-level array has
+ PTHREAD_KEY_2NDLEVEL_SIZE
+ entries. This value shouldn't be too large. */
+#define PTHREAD_KEY_2NDLEVEL_SIZE 32
+
+/* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE
+ keys in each subarray. */
+#define PTHREAD_KEY_1STLEVEL_SIZE \
+ ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \
+ / PTHREAD_KEY_2NDLEVEL_SIZE)
typedef void (*destr_function)(void *);
@@ -72,9 +84,105 @@ struct pthread_key_struct {
};
-#define PTHREAD_START_ARGS_INITIALIZER(fct) \
- { (void *(*) (void *)) fct, NULL, {{0, }}, 0, { 0 } }
+#define PTHREAD_START_ARGS_INITIALIZER { NULL, NULL, {{0, }}, 0, { 0 } }
+
+/* The type of thread descriptors */
+
+typedef struct _pthread_descr_struct * pthread_descr;
+
+/* Callback interface for removing the thread from waiting on an
+ object if it is cancelled while waiting or about to wait.
+ This hold a pointer to the object, and a pointer to a function
+ which ``extricates'' the thread from its enqueued state.
+ The function takes two arguments: pointer to the wait object,
+ and a pointer to the thread. It returns 1 if an extrication
+ actually occured, and hence the thread must also be signalled.
+ It returns 0 if the thread had already been extricated. */
+
+typedef struct _pthread_extricate_struct {
+ void *pu_object;
+ int (*pu_extricate_func)(void *, pthread_descr);
+} pthread_extricate_if;
+
+/* Atomic counter made possible by compare_and_swap */
+
+struct pthread_atomic {
+ long p_count;
+ int p_spinlock;
+};
+/* Context info for read write locks. The pthread_rwlock_info structure
+ is information about a lock that has been read-locked by the thread
+ in whose list this structure appears. The pthread_rwlock_context
+ is embedded in the thread context and contains a pointer to the
+ head of the list of lock info structures, as well as a count of
+ read locks that are untracked, because no info structure could be
+ allocated for them. */
+
+struct _pthread_rwlock_t;
+
+typedef struct _pthread_rwlock_info {
+ struct _pthread_rwlock_info *pr_next;
+ struct _pthread_rwlock_t *pr_lock;
+ int pr_lock_count;
+} pthread_readlock_info;
+
+struct _pthread_descr_struct {
+ pthread_descr p_nextlive, p_prevlive;
+ /* Double chaining of active threads */
+ pthread_descr p_nextwaiting; /* Next element in the queue holding the thr */
+ pthread_descr p_nextlock; /* can be on a queue and waiting on a lock */
+ pthread_t p_tid; /* Thread identifier */
+ int p_pid; /* PID of Unix process */
+ int p_priority; /* Thread priority (== 0 if not realtime) */
+ struct _pthread_fastlock * p_lock; /* Spinlock for synchronized accesses */
+ int p_signal; /* last signal received */
+ sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */
+ sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */
+ char p_terminated; /* true if terminated e.g. by pthread_exit */
+ char p_detached; /* true if detached */
+ char p_exited; /* true if the assoc. process terminated */
+ void * p_retval; /* placeholder for return value */
+ int p_retcode; /* placeholder for return code */
+ pthread_descr p_joining; /* thread joining on that thread or NULL */
+ struct _pthread_cleanup_buffer * p_cleanup; /* cleanup functions */
+ char p_cancelstate; /* cancellation state */
+ char p_canceltype; /* cancellation type (deferred/async) */
+ char p_canceled; /* cancellation request pending */
+ int * p_errnop; /* pointer to used errno variable */
+ int p_errno; /* error returned by last system call */
+ int * p_h_errnop; /* pointer to used h_errno variable */
+ int p_h_errno; /* error returned by last netdb function */
+ char * p_in_sighandler; /* stack address of sighandler, or NULL */
+ char p_sigwaiting; /* true if a sigwait() is in progress */
+ struct pthread_start_args p_start_args; /* arguments for thread creation */
+ void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE]; /* thread-specific data */
+ void * p_libc_specific[_LIBC_TSD_KEY_N]; /* thread-specific data for libc */
+ int p_userstack; /* nonzero if the user provided the stack */
+ void *p_guardaddr; /* address of guard area or NULL */
+ size_t p_guardsize; /* size of guard area */
+ pthread_descr p_self; /* Pointer to this structure */
+ int p_nr; /* Index of descriptor in __pthread_handles */
+ int p_report_events; /* Nonzero if events must be reported. */
+ td_eventbuf_t p_eventbuf; /* Data for event. */
+ struct pthread_atomic p_resume_count; /* number of times restart() was
+ called on thread */
+ char p_woken_by_cancel; /* cancellation performed wakeup */
+ char p_condvar_avail; /* flag if conditional variable became avail */
+ char p_sem_avail; /* flag if semaphore became available */
+ pthread_extricate_if *p_extricate; /* See above */
+ pthread_readlock_info *p_readlock_list; /* List of readlock info structs */
+ pthread_readlock_info *p_readlock_free; /* Free list of structs */
+ int p_untracked_readlock_count; /* Readlocks not tracked by list */
+ /* New elements must be added at the end. */
+#ifdef __UCLIBC_HAS_XLOCALE__
+ __locale_t locale; /* thread-specific locale from uselocale() only! */
+#endif /* __UCLIBC_HAS_XLOCALE__ */
+} __attribute__ ((aligned(32))); /* We need to align the structure so that
+ doubles are aligned properly. This is 8
+ bytes on MIPS and 16 bytes on MIPS64.
+ 32 bytes might give better cache
+ utilization. */
/* The type of thread handles. */
@@ -92,7 +200,7 @@ struct pthread_request {
pthread_descr req_thread; /* Thread doing the request */
enum { /* Request kind */
REQ_CREATE, REQ_FREE, REQ_PROCESS_EXIT, REQ_MAIN_THREAD_EXIT,
- REQ_POST, REQ_DEBUG, REQ_KICK, REQ_FOR_EACH_THREAD
+ REQ_POST, REQ_DEBUG, REQ_KICK
} req_kind;
union { /* Arguments for request */
struct { /* For REQ_CREATE: */
@@ -108,24 +216,10 @@ struct pthread_request {
int code; /* exit status */
} exit;
void * post; /* For REQ_POST: the semaphore */
- struct { /* For REQ_FOR_EACH_THREAD: callback */
- void (*fn)(void *, pthread_descr);
- void *arg;
- } for_each;
} req_args;
};
-
-typedef void (*arch_sighandler_t) (int, SIGCONTEXT);
-union sighandler
-{
- arch_sighandler_t old;
- void (*rt) (int, struct siginfo *, struct ucontext *);
-};
-extern union sighandler __sighandler[NSIG];
-
-
/* Signals used for suspend/restart and for cancellation notification. */
extern int __pthread_sig_restart;
@@ -141,10 +235,44 @@ extern int __pthread_sig_debug;
extern struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX];
+/* Descriptor of the initial thread */
+
+extern struct _pthread_descr_struct __pthread_initial_thread;
+
+/* Descriptor of the manager thread */
+
+extern struct _pthread_descr_struct __pthread_manager_thread;
+
/* Descriptor of the main thread */
extern pthread_descr __pthread_main_thread;
+/* Limit between the stack of the initial thread (above) and the
+ stacks of other threads (below). Aligned on a STACK_SIZE boundary.
+ Initially 0, meaning that the current thread is (by definition)
+ the initial thread. */
+
+/* For non-MMU systems also remember to stack top of the initial thread.
+ * This is adapted when other stacks are malloc'ed since we don't know
+ * the bounds a-priori. -StS */
+
+extern char *__pthread_initial_thread_bos;
+#ifndef __ARCH_HAS_MMU__
+extern char *__pthread_initial_thread_tos;
+#define NOMMU_INITIAL_THREAD_BOUNDS(tos,bos) \
+ if ((tos)>=__pthread_initial_thread_bos \
+ && (bos)<__pthread_initial_thread_tos) \
+ __pthread_initial_thread_bos = (tos)+1
+#else
+#define NOMMU_INITIAL_THREAD_BOUNDS(tos,bos) /* empty */
+#endif /* __ARCH_HAS_MMU__ */
+
+
+/* Indicate whether at least one thread has a user-defined stack (if 1),
+ or all threads have stacks supplied by LinuxThreads (if 0). */
+
+extern int __pthread_nonstandard_stacks;
+
/* File descriptor for sending requests to the thread manager.
Initially -1, meaning that __pthread_initialize_manager must be called. */
@@ -154,10 +282,10 @@ extern int __pthread_manager_request;
extern int __pthread_manager_reader;
-#ifdef FLOATING_STACKS
-/* Maximum stack size. */
-extern size_t __pthread_max_stacksize;
-#endif
+/* Limits of the thread manager stack. */
+
+extern char *__pthread_manager_thread_bos;
+extern char *__pthread_manager_thread_tos;
/* Pending request for a process-wide exit */
@@ -173,9 +301,6 @@ extern volatile td_thr_events_t __pthread_threads_events;
/* Pointer to descriptor of thread with last event. */
extern volatile pthread_descr __pthread_last_event;
-/* Flag which tells whether we are executing on SMP kernel. */
-extern int __pthread_smp_kernel;
-
/* Return the handle corresponding to a thread id */
static inline pthread_handle thread_handle(pthread_t id)
@@ -187,27 +312,28 @@ static inline pthread_handle thread_handle(pthread_t id)
static inline int invalid_handle(pthread_handle h, pthread_t id)
{
- return h->h_descr == NULL || h->h_descr->p_tid != id || h->h_descr->p_terminated;
-}
-
-static inline int nonexisting_handle(pthread_handle h, pthread_t id)
-{
return h->h_descr == NULL || h->h_descr->p_tid != id;
}
/* Fill in defaults left unspecified by pt-machine.h. */
-/* We round up a value with page size. */
-#ifndef page_roundup
-#define page_roundup(v,p) ((((size_t) (v)) + (p) - 1) & ~((p) - 1))
-#endif
-
/* The page size we can get from the system. This should likely not be
changed by the machine file but, you never know. */
extern size_t __pagesize;
#include <bits/uClibc_page.h>
#ifndef PAGE_SIZE
-#define PAGE_SIZE (sysconf (_SC_PAGE_SIZE))
+#define PAGE_SIZE (sysconf (_SC_PAGESIZE))
+#endif
+
+/* The max size of the thread stack segments. If the default
+ THREAD_SELF implementation is used, this must be a power of two and
+ a multiple of PAGE_SIZE. */
+#ifndef STACK_SIZE
+#ifdef __ARCH_HAS_MMU__
+#define STACK_SIZE (2 * 1024 * 1024)
+#else
+#define STACK_SIZE (4 * __pagesize)
+#endif
#endif
/* The initial size of the thread stack. Must be a multiple of PAGE_SIZE. */
@@ -228,12 +354,17 @@ extern size_t __pagesize;
#define THREAD_STACK_START_ADDRESS __pthread_initial_thread_bos
#endif
+/* Get some notion of the current stack. Need not be exactly the top
+ of the stack, just something somewhere in the current frame. */
+#ifndef CURRENT_STACK_FRAME
+#define CURRENT_STACK_FRAME ({ char __csf; &__csf; })
+#endif
+
/* If MEMORY_BARRIER isn't defined in pt-machine.h, assume the
architecture doesn't need a memory barrier instruction (e.g. Intel
x86). Still we need the compiler to respect the barrier and emit
all outstanding operations which modify memory. Some architectures
distinguish between full, read and write barriers. */
-
#ifndef MEMORY_BARRIER
#define MEMORY_BARRIER() asm ("" : : : "memory")
#endif
@@ -244,6 +375,54 @@ extern size_t __pagesize;
#define WRITE_MEMORY_BARRIER() MEMORY_BARRIER()
#endif
+/* Recover thread descriptor for the current thread */
+
+extern pthread_descr __pthread_find_self (void) __attribute__ ((const));
+
+static inline pthread_descr thread_self (void) __attribute__ ((const));
+static inline pthread_descr thread_self (void)
+{
+#ifdef THREAD_SELF
+ return THREAD_SELF;
+#else
+ char *sp = CURRENT_STACK_FRAME;
+#ifdef __ARCH_HAS_MMU__
+ if (sp >= __pthread_initial_thread_bos)
+ return &__pthread_initial_thread;
+ else if (sp >= __pthread_manager_thread_bos
+ && sp < __pthread_manager_thread_tos)
+ return &__pthread_manager_thread;
+ else if (__pthread_nonstandard_stacks)
+ return __pthread_find_self();
+ else
+ return (pthread_descr)(((unsigned long)sp | (STACK_SIZE-1))+1) - 1;
+#else
+ /* For non-MMU we need to be more careful about the initial thread stack.
+ * We refine the initial thread stack bounds dynamically as we allocate
+ * the other stack frame such that it doesn't overlap with them. Then
+ * we can be sure to pick the right thread according to the current SP */
+
+ /* Since we allow other stack frames to be above or below, we need to
+ * treat this case special. When pthread_initialize() wasn't called yet,
+ * only the initial thread is there. */
+ if (__pthread_initial_thread_bos == NULL) {
+ return &__pthread_initial_thread;
+ }
+ else if (sp >= __pthread_initial_thread_bos
+ && sp < __pthread_initial_thread_tos) {
+ return &__pthread_initial_thread;
+ }
+ else if (sp >= __pthread_manager_thread_bos
+ && sp < __pthread_manager_thread_tos) {
+ return &__pthread_manager_thread;
+ }
+ else {
+ return __pthread_find_self();
+ }
+#endif /* __ARCH_HAS_MMU__ */
+#endif
+}
+
/* Max number of times we must spin on a spinlock calling sched_yield().
After MAX_SPIN_COUNT iterations, we put the calling thread to sleep. */
@@ -251,13 +430,6 @@ extern size_t __pagesize;
#define MAX_SPIN_COUNT 50
#endif
-/* Max number of times the spinlock in the adaptive mutex implementation
- spins actively on SMP systems. */
-
-#ifndef MAX_ADAPTIVE_SPIN_COUNT
-#define MAX_ADAPTIVE_SPIN_COUNT 100
-#endif
-
/* Duration of sleep (in nanoseconds) when we can't acquire a spinlock
after MAX_SPIN_COUNT iterations of sched_yield().
With the 2.0 and 2.1 kernels, this MUST BE > 2ms.
@@ -283,7 +455,6 @@ extern size_t __pagesize;
void __pthread_destroy_specifics(void);
void __pthread_perform_cleanup(void);
-void __pthread_init_max_stacksize (void);
int __pthread_initialize_manager(void);
void __pthread_message(char * fmt, ...);
int __pthread_manager(void *reqfd);