summaryrefslogtreecommitdiff
path: root/libpthread/nptl/pthread_mutex_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/nptl/pthread_mutex_lock.c')
-rw-r--r--libpthread/nptl/pthread_mutex_lock.c436
1 files changed, 406 insertions, 30 deletions
diff --git a/libpthread/nptl/pthread_mutex_lock.c b/libpthread/nptl/pthread_mutex_lock.c
index faa5391dd..47e4d4158 100644
--- a/libpthread/nptl/pthread_mutex_lock.c
+++ b/libpthread/nptl/pthread_mutex_lock.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,27 +19,52 @@
#include <assert.h>
#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <not-cancel.h>
#include "pthreadP.h"
#include <lowlevellock.h>
#ifndef LLL_MUTEX_LOCK
-# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
-# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
+# define LLL_MUTEX_LOCK(mutex) \
+ lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
+# define LLL_MUTEX_TRYLOCK(mutex) \
+ lll_trylock ((mutex)->__data.__lock)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
+ lll_robust_lock ((mutex)->__data.__lock, id, \
+ PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
#endif
+static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
+ __attribute_noinline__;
+
+
int
-__pthread_mutex_lock (pthread_mutex_t *mutex)
+__pthread_mutex_lock (
+ pthread_mutex_t *mutex)
{
assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
+ unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
+ if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
+ return __pthread_mutex_lock_full (mutex);
+
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
- switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
+ if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
+ == PTHREAD_MUTEX_TIMED_NP)
+ {
+ simple:
+ /* Normal mutex. */
+ LLL_MUTEX_LOCK (mutex);
+ assert (mutex->__data.__owner == 0);
+ }
+ else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
{
/* Recursive mutex. */
- case PTHREAD_MUTEX_RECURSIVE_NP:
+
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
{
@@ -54,32 +79,17 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
}
/* We have to get the mutex. */
- LLL_MUTEX_LOCK (mutex->__data.__lock);
+ LLL_MUTEX_LOCK (mutex);
+ assert (mutex->__data.__owner == 0);
mutex->__data.__count = 1;
- break;
-
- /* Error checking mutex. */
- case PTHREAD_MUTEX_ERRORCHECK_NP:
- /* Check whether we already hold the mutex. */
- if (mutex->__data.__owner == id)
- return EDEADLK;
-
- /* FALLTHROUGH */
-
- default:
- /* Correct code cannot set any other type. */
- case PTHREAD_MUTEX_TIMED_NP:
- simple:
- /* Normal mutex. */
- LLL_MUTEX_LOCK (mutex->__data.__lock);
- break;
-
- case PTHREAD_MUTEX_ADAPTIVE_NP:
+ }
+ else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
+ {
if (! __is_smp)
goto simple;
- if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0)
+ if (LLL_MUTEX_TRYLOCK (mutex) != 0)
{
int cnt = 0;
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
@@ -88,7 +98,7 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
{
if (cnt++ >= max_cnt)
{
- LLL_MUTEX_LOCK (mutex->__data.__lock);
+ LLL_MUTEX_LOCK (mutex);
break;
}
@@ -96,15 +106,362 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
BUSY_WAIT_NOP;
#endif
}
- while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0);
+ while (LLL_MUTEX_TRYLOCK (mutex) != 0);
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
}
+ assert (mutex->__data.__owner == 0);
+ }
+ else
+ {
+ assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
+ /* Check whether we already hold the mutex. */
+ if (__builtin_expect (mutex->__data.__owner == id, 0))
+ return EDEADLK;
+ goto simple;
+ }
+
+ /* Record the ownership. */
+ mutex->__data.__owner = id;
+#ifndef NO_INCR
+ ++mutex->__data.__nusers;
+#endif
+
+ return 0;
+}
+
+static int
+__pthread_mutex_lock_full (pthread_mutex_t *mutex)
+{
+ int oldval;
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+
+ switch (PTHREAD_MUTEX_TYPE (mutex))
+ {
+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ &mutex->__data.__list.__next);
+
+ oldval = mutex->__data.__lock;
+ do
+ {
+ again:
+ if ((oldval & FUTEX_OWNER_DIED) != 0)
+ {
+ /* The previous owner died. Try locking the mutex. */
+ int newval = id;
+#ifdef NO_INCR
+ newval |= FUTEX_WAITERS;
+#else
+ newval |= (oldval & FUTEX_WAITERS);
+#endif
+
+ newval
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ newval, oldval);
+
+ if (newval != oldval)
+ {
+ oldval = newval;
+ goto again;
+ }
+
+ /* We got the mutex. */
+ mutex->__data.__count = 1;
+ /* But it is inconsistent unless marked otherwise. */
+ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+ ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ /* Note that we deliberately exit here. If we fall
+ through to the end of the function __nusers would be
+ incremented which is not correct because the old
+ owner has to be discounted. If we are not supposed
+ to increment __nusers we actually have to decrement
+ it here. */
+#ifdef NO_INCR
+ --mutex->__data.__nusers;
+#endif
+
+ return EOWNERDEAD;
+ }
+
+ /* Check whether we already hold the mutex. */
+ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ {
+ int kind = PTHREAD_MUTEX_TYPE (mutex);
+ if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ NULL);
+ return EDEADLK;
+ }
+
+ if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ NULL);
+
+ /* Just bump the counter. */
+ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ /* Overflow of the counter. */
+ return EAGAIN;
+
+ ++mutex->__data.__count;
+
+ return 0;
+ }
+ }
+
+ oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
+
+ if (__builtin_expect (mutex->__data.__owner
+ == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+ {
+ /* This mutex is now not recoverable. */
+ mutex->__data.__count = 0;
+ lll_unlock (mutex->__data.__lock,
+ PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ return ENOTRECOVERABLE;
+ }
+ }
+ while ((oldval & FUTEX_OWNER_DIED) != 0);
+
+ mutex->__data.__count = 1;
+ ENQUEUE_MUTEX (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ break;
+
+ case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+ case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_PI_NORMAL_NP:
+ case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+ {
+ int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+ int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+
+ if (robust)
+ /* Note: robust PI futexes are signaled by setting bit 0. */
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ (void *) (((uintptr_t) &mutex->__data.__list.__next)
+ | 1));
+
+ oldval = mutex->__data.__lock;
+
+ /* Check whether we already hold the mutex. */
+ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ {
+ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ return EDEADLK;
+ }
+
+ if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ /* Just bump the counter. */
+ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ /* Overflow of the counter. */
+ return EAGAIN;
+
+ ++mutex->__data.__count;
+
+ return 0;
+ }
+ }
+
+ int newval = id;
+#ifdef NO_INCR
+ newval |= FUTEX_WAITERS;
+#endif
+ oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ newval, 0);
+
+ if (oldval != 0)
+ {
+ /* The mutex is locked. The kernel will now take care of
+ everything. */
+ int private = (robust
+ ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+ : PTHREAD_MUTEX_PSHARED (mutex));
+ INTERNAL_SYSCALL_DECL (__err);
+ int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+ __lll_private_flag (FUTEX_LOCK_PI,
+ private), 1, 0);
+
+ if (INTERNAL_SYSCALL_ERROR_P (e, __err)
+ && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
+ || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
+ {
+ assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
+ || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
+ && kind != PTHREAD_MUTEX_RECURSIVE_NP));
+ /* ESRCH can happen only for non-robust PI mutexes where
+ the owner of the lock died. */
+ assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
+
+ /* Delay the thread indefinitely. */
+ while (1)
+ pause_not_cancel ();
+ }
+
+ oldval = mutex->__data.__lock;
+
+ assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
+ }
+
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ {
+ atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+
+ /* We got the mutex. */
+ mutex->__data.__count = 1;
+ /* But it is inconsistent unless marked otherwise. */
+ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+ ENQUEUE_MUTEX_PI (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ /* Note that we deliberately exit here. If we fall
+ through to the end of the function __nusers would be
+ incremented which is not correct because the old owner
+ has to be discounted. If we are not supposed to
+ increment __nusers we actually have to decrement it here. */
+#ifdef NO_INCR
+ --mutex->__data.__nusers;
+#endif
+
+ return EOWNERDEAD;
+ }
+
+ if (robust
+ && __builtin_expect (mutex->__data.__owner
+ == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+ {
+ /* This mutex is now not recoverable. */
+ mutex->__data.__count = 0;
+
+ INTERNAL_SYSCALL_DECL (__err);
+ INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+ __lll_private_flag (FUTEX_UNLOCK_PI,
+ PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
+ 0, 0);
+
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ return ENOTRECOVERABLE;
+ }
+
+ mutex->__data.__count = 1;
+ if (robust)
+ {
+ ENQUEUE_MUTEX_PI (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ }
+ }
break;
+
+ case PTHREAD_MUTEX_PP_RECURSIVE_NP:
+ case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_PP_NORMAL_NP:
+ case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
+ {
+ int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+
+ oldval = mutex->__data.__lock;
+
+ /* Check whether we already hold the mutex. */
+ if (mutex->__data.__owner == id)
+ {
+ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+ return EDEADLK;
+
+ if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+ {
+ /* Just bump the counter. */
+ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ /* Overflow of the counter. */
+ return EAGAIN;
+
+ ++mutex->__data.__count;
+
+ return 0;
+ }
+ }
+
+ int oldprio = -1, ceilval;
+ do
+ {
+ int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
+ >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+
+ if (__pthread_current_priority () > ceiling)
+ {
+ if (oldprio != -1)
+ __pthread_tpp_change_priority (oldprio, -1);
+ return EINVAL;
+ }
+
+ int retval = __pthread_tpp_change_priority (oldprio, ceiling);
+ if (retval)
+ return retval;
+
+ ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+ oldprio = ceiling;
+
+ oldval
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+#ifdef NO_INCR
+ ceilval | 2,
+#else
+ ceilval | 1,
+#endif
+ ceilval);
+
+ if (oldval == ceilval)
+ break;
+
+ do
+ {
+ oldval
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ ceilval | 2,
+ ceilval | 1);
+
+ if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
+ break;
+
+ if (oldval != ceilval)
+ lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
+ PTHREAD_MUTEX_PSHARED (mutex));
+ }
+ while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ ceilval | 2, ceilval)
+ != ceilval);
+ }
+ while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
+
+ assert (mutex->__data.__owner == 0);
+ mutex->__data.__count = 1;
+ }
+ break;
+
+ default:
+ /* Correct code cannot set any other type. */
+ return EINVAL;
}
/* Record the ownership. */
- assert (mutex->__data.__owner == 0);
mutex->__data.__owner = id;
#ifndef NO_INCR
++mutex->__data.__nusers;
@@ -116,3 +473,22 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
#endif
+
+
+#ifdef NO_INCR
+void
+__pthread_mutex_cond_lock_adjust (
+ pthread_mutex_t *mutex)
+{
+ assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
+ assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
+ assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
+
+ /* Record the ownership. */
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
+ mutex->__data.__owner = id;
+
+ if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
+ ++mutex->__data.__count;
+}
+#endif