summaryrefslogtreecommitdiff
path: root/libpthread/nptl/descr.h
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/nptl/descr.h')
-rw-r--r--libpthread/nptl/descr.h139
1 files changed, 122 insertions, 17 deletions
diff --git a/libpthread/nptl/descr.h b/libpthread/nptl/descr.h
index 1a8d91be8..c355eae3c 100644
--- a/libpthread/nptl/descr.h
+++ b/libpthread/nptl/descr.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -36,6 +36,7 @@
#endif
#define __need_res_state
#include <resolv.h>
+#include <bits/kernel-features.h>
#ifndef TCB_ALIGNMENT
# define TCB_ALIGNMENT sizeof (double)
@@ -101,6 +102,23 @@ struct xid_command
};
+/* Data structure used by the kernel to find robust futexes. */
+struct robust_list_head
+{
+ void *list;
+ long int futex_offset;
+ void *list_op_pending;
+};
+
+
+/* Data strcture used to handle thread priority protection. */
+struct priority_protection_data
+{
+ int priomax;
+ unsigned int priomap[];
+};
+
+
/* Thread descriptor data structure. */
struct pthread
{
@@ -113,6 +131,10 @@ struct pthread
struct
{
int multiple_threads;
+ int gscope_flag;
+# ifndef __ASSUME_PRIVATE_FUTEX
+ int private_futex;
+# endif
} header;
#endif
@@ -120,7 +142,7 @@ struct pthread
is private and subject to change without affecting the official ABI.
We just have it here in case it might be convenient for some
implementation-specific instrumentation hack or suchlike. */
- void *__padding[16];
+ void *__padding[24];
};
/* This descriptor's link on the `stack_used' or `__stack_user' list. */
@@ -133,6 +155,82 @@ struct pthread
/* Process ID - thread group ID in kernel speak. */
pid_t pid;
+ /* List of robust mutexes the thread is holding. */
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+ void *robust_prev;
+ struct robust_list_head robust_head;
+
+ /* The list above is strange. It is basically a double linked list
+ but the pointer to the next/previous element of the list points
+ in the middle of the object, the __next element. Whenever
+ casting to __pthread_list_t we need to adjust the pointer
+ first. */
+# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
+
+# define ENQUEUE_MUTEX_BOTH(mutex, val) \
+ do { \
+ __pthread_list_t *next = (__pthread_list_t *) \
+ ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \
+ - QUEUE_PTR_ADJUST); \
+ next->__prev = (void *) &mutex->__data.__list.__next; \
+ mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \
+ robust_head.list); \
+ mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
+ THREAD_SETMEM (THREAD_SELF, robust_head.list, \
+ (void *) (((uintptr_t) &mutex->__data.__list.__next) \
+ | val)); \
+ } while (0)
+# define DEQUEUE_MUTEX(mutex) \
+ do { \
+ __pthread_list_t *next = (__pthread_list_t *) \
+ ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \
+ - QUEUE_PTR_ADJUST); \
+ next->__prev = mutex->__data.__list.__prev; \
+ __pthread_list_t *prev = (__pthread_list_t *) \
+ ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \
+ - QUEUE_PTR_ADJUST); \
+ prev->__next = mutex->__data.__list.__next; \
+ mutex->__data.__list.__prev = NULL; \
+ mutex->__data.__list.__next = NULL; \
+ } while (0)
+#else
+ union
+ {
+ __pthread_slist_t robust_list;
+ struct robust_list_head robust_head;
+ };
+
+# define ENQUEUE_MUTEX_BOTH(mutex, val) \
+ do { \
+ mutex->__data.__list.__next \
+ = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
+ THREAD_SETMEM (THREAD_SELF, robust_list.__next, \
+ (void *) (((uintptr_t) &mutex->__data.__list) | val)); \
+ } while (0)
+# define DEQUEUE_MUTEX(mutex) \
+ do { \
+ __pthread_slist_t *runp = (__pthread_slist_t *) \
+ (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \
+ if (runp == &mutex->__data.__list) \
+ THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \
+ else \
+ { \
+ __pthread_slist_t *next = (__pthread_slist_t *) \
+ (((uintptr_t) runp->__next) & ~1ul); \
+ while (next != &mutex->__data.__list) \
+ { \
+ runp = next; \
+ next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \
+ } \
+ \
+ runp->__next = next->__next; \
+ mutex->__data.__list.__next = NULL; \
+ } \
+ } while (0)
+#endif
+#define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0)
+#define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1)
+
/* List of cleanup buffers. */
struct _pthread_cleanup_buffer *cleanup;
@@ -144,25 +242,25 @@ struct pthread
int cancelhandling;
/* Bit set if cancellation is disabled. */
#define CANCELSTATE_BIT 0
-#define CANCELSTATE_BITMASK 0x01
+#define CANCELSTATE_BITMASK (0x01 << CANCELSTATE_BIT)
/* Bit set if asynchronous cancellation mode is selected. */
#define CANCELTYPE_BIT 1
-#define CANCELTYPE_BITMASK 0x02
+#define CANCELTYPE_BITMASK (0x01 << CANCELTYPE_BIT)
/* Bit set if canceling has been initiated. */
#define CANCELING_BIT 2
-#define CANCELING_BITMASK 0x04
+#define CANCELING_BITMASK (0x01 << CANCELING_BIT)
/* Bit set if canceled. */
#define CANCELED_BIT 3
-#define CANCELED_BITMASK 0x08
+#define CANCELED_BITMASK (0x01 << CANCELED_BIT)
/* Bit set if thread is exiting. */
#define EXITING_BIT 4
-#define EXITING_BITMASK 0x10
+#define EXITING_BITMASK (0x01 << EXITING_BIT)
/* Bit set if thread terminated and TCB is freed. */
#define TERMINATED_BIT 5
-#define TERMINATED_BITMASK 0x20
+#define TERMINATED_BITMASK (0x01 << TERMINATED_BIT)
/* Bit set if thread is supposed to change XID. */
#define SETXID_BIT 6
-#define SETXID_BITMASK 0x40
+#define SETXID_BITMASK (0x01 << SETXID_BIT)
/* Mask for the rest. Helps the compiler to optimize. */
#define CANCEL_RESTMASK 0xffffff80
@@ -174,6 +272,9 @@ struct pthread
| EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK)) \
== (CANCELTYPE_BITMASK | CANCELED_BITMASK))
+ /* Flags. Including those copied from the thread attribute. */
+ int flags;
+
/* We allocate one block of references here. This should be enough
to avoid allocating any memory dynamically for most applications. */
struct pthread_key_data
@@ -187,12 +288,12 @@ struct pthread
void *data;
} specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE];
- /* Flag which is set when specific data is set. */
- bool specific_used;
-
/* Two-level array for the thread-specific data. */
struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE];
+ /* Flag which is set when specific data is set. */
+ bool specific_used;
+
/* True if events must be reported. */
bool report_events;
@@ -202,11 +303,15 @@ struct pthread
/* True if thread must stop at startup time. */
bool stopped_start;
+ /* The parent's cancel handling at the time of the pthread_create
+ call. This might be needed to undo the effects of a cancellation. */
+ int parent_cancelhandling;
+
/* Lock to synchronize access to the descriptor. */
- lll_lock_t lock;
+ int lock;
/* Lock for synchronizing setxid calls. */
- lll_lock_t setxid_futex;
+ int setxid_futex;
#if HP_TIMING_AVAIL
/* Offset of the CPU clock at start thread start time. */
@@ -223,9 +328,6 @@ struct pthread
/* Check whether a thread is detached. */
#define IS_DETACHED(pd) ((pd)->joinid == (pd))
- /* Flags. Including those copied from the thread attribute. */
- int flags;
-
/* The result of the thread function. */
void *result;
@@ -257,6 +359,9 @@ struct pthread
/* This is what the user specified and what we will report. */
size_t reported_guardsize;
+ /* Thread Priority Protection data. */
+ struct priority_protection_data *tpp;
+
/* Resolver state. */
struct __res_state res;