diff options
Diffstat (limited to 'libpthread/linuxthreads/semaphore.c')
-rw-r--r-- | libpthread/linuxthreads/semaphore.c | 159 |
1 files changed, 128 insertions, 31 deletions
diff --git a/libpthread/linuxthreads/semaphore.c b/libpthread/linuxthreads/semaphore.c index 0297b3a1e..0038ddd2a 100644 --- a/libpthread/linuxthreads/semaphore.c +++ b/libpthread/linuxthreads/semaphore.c @@ -14,6 +14,8 @@ /* Semaphores a la POSIX 1003.1b */ +#include <features.h> +#define __USE_GNU #include <errno.h> #include "pthread.h" #include "semaphore.h" @@ -32,7 +34,7 @@ int __new_sem_init(sem_t *sem, int pshared, unsigned int value) errno = ENOSYS; return -1; } - __pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock); + __pthread_init_lock(&sem->__sem_lock); sem->__sem_value = value; sem->__sem_waiting = NULL; return 0; @@ -47,9 +49,9 @@ static int new_sem_extricate_func(void *obj, pthread_descr th) sem_t *sem = obj; int did_remove = 0; - __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self); + __pthread_lock(&sem->__sem_lock, self); did_remove = remove_from_queue(&sem->__sem_waiting, th); - __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock); + __pthread_unlock(&sem->__sem_lock); return did_remove; } @@ -59,35 +61,50 @@ int __new_sem_wait(sem_t * sem) volatile pthread_descr self = thread_self(); pthread_extricate_if extr; int already_canceled = 0; + int spurious_wakeup_count; /* Set up extrication interface */ extr.pu_object = sem; extr.pu_extricate_func = new_sem_extricate_func; - __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self); + __pthread_lock(&sem->__sem_lock, self); if (sem->__sem_value > 0) { sem->__sem_value--; - __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock); + __pthread_unlock(&sem->__sem_lock); return 0; } /* Register extrication interface */ - __pthread_set_own_extricate_if(self, &extr); + THREAD_SETMEM(self, p_sem_avail, 0); + __pthread_set_own_extricate_if(self, &extr); /* Enqueue only if not already cancelled. */ if (!(THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)) enqueue(&sem->__sem_waiting, self); else already_canceled = 1; - __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock); + __pthread_unlock(&sem->__sem_lock); if (already_canceled) { - __pthread_set_own_extricate_if(self, 0); + __pthread_set_own_extricate_if(self, 0); pthread_exit(PTHREAD_CANCELED); - } + } /* Wait for sem_post or cancellation, or fall through if already canceled */ - suspend(self); - __pthread_set_own_extricate_if(self, 0); + spurious_wakeup_count = 0; + while (1) + { + suspend(self); + if (THREAD_GETMEM(self, p_sem_avail) == 0 + && (THREAD_GETMEM(self, p_woken_by_cancel) == 0 + || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE)) + { + /* Count resumes that don't belong to us. */ + spurious_wakeup_count++; + continue; + } + break; + } + __pthread_set_own_extricate_if(self, 0); /* Terminate only if the wakeup came from cancellation. */ /* Otherwise ignore cancellation because we got the semaphore. */ @@ -105,7 +122,7 @@ int __new_sem_trywait(sem_t * sem) { int retval; - __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL); + __pthread_lock(&sem->__sem_lock, NULL); if (sem->__sem_value == 0) { errno = EAGAIN; retval = -1; @@ -113,7 +130,7 @@ int __new_sem_trywait(sem_t * sem) sem->__sem_value--; retval = 0; } - __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock); + __pthread_unlock(&sem->__sem_lock); return retval; } @@ -124,19 +141,21 @@ int __new_sem_post(sem_t * sem) struct pthread_request request; if (THREAD_GETMEM(self, p_in_sighandler) == NULL) { - __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self); + __pthread_lock(&sem->__sem_lock, self); if (sem->__sem_waiting == NULL) { if (sem->__sem_value >= SEM_VALUE_MAX) { /* Overflow */ errno = ERANGE; - __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock); + __pthread_unlock(&sem->__sem_lock); return -1; } sem->__sem_value++; - __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock); + __pthread_unlock(&sem->__sem_lock); } else { th = dequeue(&sem->__sem_waiting); - __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock); + __pthread_unlock(&sem->__sem_lock); + th->p_sem_avail = 1; + WRITE_MEMORY_BARRIER(); restart(th); } } else { @@ -150,8 +169,8 @@ int __new_sem_post(sem_t * sem) } request.req_kind = REQ_POST; request.req_args.post = sem; - __libc_write(__pthread_manager_request, - (char *) &request, sizeof(request)); + TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request, + (char *) &request, sizeof(request))); } return 0; } @@ -189,21 +208,99 @@ int sem_unlink(const char *name) return -1; } -#if defined PIC && DO_VERSIONING -default_symbol_version (__new_sem_init, sem_init, GLIBC_2.1); -default_symbol_version (__new_sem_wait, sem_wait, GLIBC_2.1); -default_symbol_version (__new_sem_trywait, sem_trywait, GLIBC_2.1); -default_symbol_version (__new_sem_post, sem_post, GLIBC_2.1); -default_symbol_version (__new_sem_getvalue, sem_getvalue, GLIBC_2.1); -default_symbol_version (__new_sem_destroy, sem_destroy, GLIBC_2.1); -#else -# ifdef weak_alias +int sem_timedwait(sem_t *sem, const struct timespec *abstime) +{ + pthread_descr self = thread_self(); + pthread_extricate_if extr; + int already_canceled = 0; + int spurious_wakeup_count; + + __pthread_lock(&sem->__sem_lock, self); + if (sem->__sem_value > 0) { + --sem->__sem_value; + __pthread_unlock(&sem->__sem_lock); + return 0; + } + + if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { + /* The standard requires that if the function would block and the + time value is illegal, the function returns with an error. */ + __pthread_unlock(&sem->__sem_lock); + return EINVAL; + } + + /* Set up extrication interface */ + extr.pu_object = sem; + extr.pu_extricate_func = new_sem_extricate_func; + + /* Register extrication interface */ + THREAD_SETMEM(self, p_sem_avail, 0); + __pthread_set_own_extricate_if(self, &extr); + /* Enqueue only if not already cancelled. */ + if (!(THREAD_GETMEM(self, p_canceled) + && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)) + enqueue(&sem->__sem_waiting, self); + else + already_canceled = 1; + __pthread_unlock(&sem->__sem_lock); + + if (already_canceled) { + __pthread_set_own_extricate_if(self, 0); + pthread_exit(PTHREAD_CANCELED); + } + + spurious_wakeup_count = 0; + while (1) + { + if (timedsuspend(self, abstime) == 0) { + int was_on_queue; + + /* __pthread_lock will queue back any spurious restarts that + may happen to it. */ + + __pthread_lock(&sem->__sem_lock, self); + was_on_queue = remove_from_queue(&sem->__sem_waiting, self); + __pthread_unlock(&sem->__sem_lock); + + if (was_on_queue) { + __pthread_set_own_extricate_if(self, 0); + return ETIMEDOUT; + } + + /* Eat the outstanding restart() from the signaller */ + suspend(self); + } + + if (THREAD_GETMEM(self, p_sem_avail) == 0 + && (THREAD_GETMEM(self, p_woken_by_cancel) == 0 + || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE)) + { + /* Count resumes that don't belong to us. */ + spurious_wakeup_count++; + continue; + } + break; + } + + __pthread_set_own_extricate_if(self, 0); + + /* Terminate only if the wakeup came from cancellation. */ + /* Otherwise ignore cancellation because we got the semaphore. */ + + if (THREAD_GETMEM(self, p_woken_by_cancel) + && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { + THREAD_SETMEM(self, p_woken_by_cancel, 0); + pthread_exit(PTHREAD_CANCELED); + } + /* We got the semaphore */ + return 0; +} + + weak_alias (__new_sem_init, sem_init) weak_alias (__new_sem_wait, sem_wait) weak_alias (__new_sem_trywait, sem_trywait) weak_alias (__new_sem_post, sem_post) weak_alias (__new_sem_getvalue, sem_getvalue) weak_alias (__new_sem_destroy, sem_destroy) -# endif -#endif - + |