diff options
Diffstat (limited to 'libpthread/linuxthreads/sysdeps/i386')
-rw-r--r-- | libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h | 78 | ||||
-rw-r--r-- | libpthread/linuxthreads/sysdeps/i386/pspinlock.c | 102 | ||||
-rw-r--r-- | libpthread/linuxthreads/sysdeps/i386/pt-machine.h | 93 | ||||
-rw-r--r-- | libpthread/linuxthreads/sysdeps/i386/tcb-offsets.sym | 7 | ||||
-rw-r--r-- | libpthread/linuxthreads/sysdeps/i386/tls.h | 84 | ||||
-rw-r--r-- | libpthread/linuxthreads/sysdeps/i386/useldt.h | 19 |
6 files changed, 295 insertions, 88 deletions
diff --git a/libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h b/libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h new file mode 100644 index 000000000..2e52abe2e --- /dev/null +++ b/libpthread/linuxthreads/sysdeps/i386/i686/pt-machine.h @@ -0,0 +1,78 @@ +/* Machine-dependent pthreads configuration and inline functions. + i686 version. + Copyright (C) 1996-2001, 2002, 2003 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Richard Henderson <rth@tamu.edu>. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 2.1 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; see the file COPYING.LIB. If + not, see <http://www.gnu.org/licenses/>. */ + +#ifndef _PT_MACHINE_H +#define _PT_MACHINE_H 1 + +#ifndef PT_EI +# define PT_EI __extern_always_inline +#endif +#include <bits/kernel-features.h> + +#ifndef __ASSEMBLER__ +extern long int testandset (int *spinlock); +extern int __compare_and_swap (long int *p, long int oldval, long int newval); + +/* Get some notion of the current stack. Need not be exactly the top + of the stack, just something somewhere in the current frame. */ +#define CURRENT_STACK_FRAME __builtin_frame_address (0) + + +/* Spinlock implementation; required. */ +PT_EI long int +testandset (int *spinlock) +{ + long int ret; + + __asm__ __volatile__ ( + "xchgl %0, %1" + : "=r" (ret), "=m" (*spinlock) + : "0" (1), "m" (*spinlock) + : "memory"); + + return ret; +} + + +/* Compare-and-swap for semaphores. It's always available on i686. */ +#define HAS_COMPARE_AND_SWAP + +PT_EI int +__compare_and_swap (long int *p, long int oldval, long int newval) +{ + char ret; + long int readval; + + __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0" + : "=q" (ret), "=m" (*p), "=a" (readval) + : "r" (newval), "m" (*p), "a" (oldval) + : "memory"); + return ret; +} +#endif + +#if __ASSUME_LDT_WORKS > 0 +#include "../useldt.h" +#endif + +/* The P4 and above really want some help to prevent overheating. */ +#define BUSY_WAIT_NOP __asm__ ("rep; nop") + +#endif /* pt-machine.h */ diff --git a/libpthread/linuxthreads/sysdeps/i386/pspinlock.c b/libpthread/linuxthreads/sysdeps/i386/pspinlock.c new file mode 100644 index 000000000..7936735f9 --- /dev/null +++ b/libpthread/linuxthreads/sysdeps/i386/pspinlock.c @@ -0,0 +1,102 @@ +/* POSIX spinlock implementation. x86 version. + Copyright (C) 2000, 2002, 2006 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 2.1 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; see the file COPYING.LIB. If + not, see <http://www.gnu.org/licenses/>. */ + +#include <errno.h> +#include <pthread.h> +#include "internals.h" +#include <bits/kernel-features.h> + + +/* This implementation is similar to the one used in the Linux kernel. + But the kernel is byte instructions for the memory access. This is + faster but unusable here. The problem is that only 128 + threads/processes could use the spinlock at the same time. If (by + a design error in the program) a thread/process would hold the + spinlock for a time long enough to accumulate 128 waiting + processes, the next one will find a positive value in the spinlock + and assume it is unlocked. We cannot accept that. */ + +int +__pthread_spin_lock (pthread_spinlock_t *lock) +{ + __asm__ __volatile__ + ("\n" + "1:\n\t" + "lock; decl %0\n\t" + "js 2f\n\t" + ".section .text.spinlock,\"ax\"\n" + "2:\n\t" + "cmpl $0,%0\n\t" + "rep; nop\n\t" + "jle 2b\n\t" + "jmp 1b\n\t" + ".previous" + : "=m" (*lock)); + return 0; +} +weak_alias (__pthread_spin_lock, pthread_spin_lock) + + +int +__pthread_spin_trylock (pthread_spinlock_t *lock) +{ + int oldval; + + __asm__ __volatile__ + ("xchgl %0,%1" + : "=r" (oldval), "=m" (*lock) + : "0" (0)); + return oldval > 0 ? 0 : EBUSY; +} +weak_alias (__pthread_spin_trylock, pthread_spin_trylock) + + +int +__pthread_spin_unlock (pthread_spinlock_t *lock) +{ + __asm__ __volatile__ + ("movl $1,%0" + : "=m" (*lock)); + return 0; +} +weak_alias (__pthread_spin_unlock, pthread_spin_unlock) + + +int +__pthread_spin_init (pthread_spinlock_t *lock, int pshared) +{ + /* We can ignore the `pshared' parameter. Since we are busy-waiting + all processes which can access the memory location `lock' points + to can use the spinlock. */ + *lock = 1; + return 0; +} +weak_alias (__pthread_spin_init, pthread_spin_init) + + +int +__pthread_spin_destroy (pthread_spinlock_t *lock) +{ + /* Nothing to do. */ + return 0; +} +weak_alias (__pthread_spin_destroy, pthread_spin_destroy) + +#ifndef __ASSUME_SET_THREAD_AREA_SYSCALL +int __have_no_set_thread_area; +#endif diff --git a/libpthread/linuxthreads/sysdeps/i386/pt-machine.h b/libpthread/linuxthreads/sysdeps/i386/pt-machine.h index 24c5e6c7c..82a5cf077 100644 --- a/libpthread/linuxthreads/sysdeps/i386/pt-machine.h +++ b/libpthread/linuxthreads/sysdeps/i386/pt-machine.h @@ -18,14 +18,17 @@ License along with the GNU C Library; see the file COPYING.LIB. If not, see <http://www.gnu.org/licenses/>. */ +#if defined __pentiumpro__ || defined __pentium4__ || defined __athlon__ || \ + defined __k8__ +# include "i686/pt-machine.h" +#else + #ifndef _PT_MACHINE_H #define _PT_MACHINE_H 1 -#include <features.h> - #ifndef __ASSEMBLER__ #ifndef PT_EI -# define PT_EI __extern_always_inline __attribute__((visibility("hidden"))) +# define PT_EI __extern_always_inline #endif extern long int testandset (int *spinlock); @@ -36,54 +39,6 @@ extern int __compare_and_swap (long int *p, long int oldval, long int newval); #define CURRENT_STACK_FRAME __builtin_frame_address (0) -/* See if we can optimize for newer cpus... */ -#if defined __GNUC__ && __GNUC__ >= 2 && \ - (defined __i486__ || defined __pentium__ || defined __pentiumpro__ || defined __pentium4__ || \ - defined __athlon__ || defined __k8__) - -/* Spinlock implementation; required. */ -PT_EI long int -testandset (int *spinlock) -{ - long int ret; - - __asm__ __volatile__ ( - "xchgl %0, %1" - : "=r" (ret), "=m" (*spinlock) - : "0" (1), "m" (*spinlock) - : "memory"); - - return ret; -} - -/* Compare-and-swap for semaphores. It's always available on i686. */ -#define HAS_COMPARE_AND_SWAP - -PT_EI int -__compare_and_swap (long int *p, long int oldval, long int newval) -{ - char ret; - long int readval; - - __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0" - : "=q" (ret), "=m" (*p), "=a" (readval) - : "r" (newval), "m" (*p), "a" (oldval) - : "memory"); - return ret; -} - -#if defined(__ASSUME_LDT_WORKS) && __ASSUME_LDT_WORKS > 0 -#include "useldt.h" -#endif - -/* The P4 and above really want some help to prevent overheating. */ -#define BUSY_WAIT_NOP __asm__ ("rep; nop") - - -#else /* Generic i386 implementation */ - -extern int compare_and_swap_is_available (void); - /* Spinlock implementation; required. */ PT_EI long int testandset (int *spinlock) @@ -120,27 +75,43 @@ __compare_and_swap (long int *p, long int oldval, long int newval) return ret; } + +PT_EI int get_eflags (void); +PT_EI int +get_eflags (void) +{ + int res; + __asm__ __volatile__ ("pushfl; popl %0" : "=r" (res) : ); + return res; +} + + +PT_EI void set_eflags (int newflags); +PT_EI void +set_eflags (int newflags) +{ + __asm__ __volatile__ ("pushl %0; popfl" : : "r" (newflags) : "cc"); +} + + +PT_EI int compare_and_swap_is_available (void); PT_EI int compare_and_swap_is_available (void) { + int oldflags = get_eflags (); int changed; - int oldflags; - /* get EFLAGS */ - __asm__ __volatile__ ("pushfl; popl %0" : "=r" (oldflags) : ); /* Flip AC bit in EFLAGS. */ - __asm__ __volatile__ ("pushl %0; popfl" : : "r" (oldflags ^ 0x40000) : "cc"); - /* reread EFLAGS */ - __asm__ __volatile__ ("pushfl; popl %0" : "=r" (changed) : ); + set_eflags (oldflags ^ 0x40000); /* See if bit changed. */ - changed = (changed ^ oldflags) & 0x40000; + changed = (get_eflags () ^ oldflags) & 0x40000; /* Restore EFLAGS. */ - __asm__ __volatile__ ("pushl %0; popfl" : : "r" (oldflags) : "cc"); + set_eflags (oldflags); /* If the AC flag did not change, it's a 386 and it lacks cmpxchg. Otherwise, it's a 486 or above and it has cmpxchg. */ return changed != 0; } -#endif /* Generic i386 implementation */ - #endif /* __ASSEMBLER__ */ #endif /* pt-machine.h */ + +#endif diff --git a/libpthread/linuxthreads/sysdeps/i386/tcb-offsets.sym b/libpthread/linuxthreads/sysdeps/i386/tcb-offsets.sym new file mode 100644 index 000000000..69a5018d8 --- /dev/null +++ b/libpthread/linuxthreads/sysdeps/i386/tcb-offsets.sym @@ -0,0 +1,7 @@ +#include <sysdep.h> +#include <tls.h> + +MULTIPLE_THREADS_OFFSET offsetof (tcbhead_t, multiple_threads) +#ifdef NEED_DL_SYSINFO +SYSINFO_OFFSET offsetof (tcbhead_t, sysinfo) +#endif diff --git a/libpthread/linuxthreads/sysdeps/i386/tls.h b/libpthread/linuxthreads/sysdeps/i386/tls.h index 4469f0776..d79bf0779 100644 --- a/libpthread/linuxthreads/sysdeps/i386/tls.h +++ b/libpthread/linuxthreads/sysdeps/i386/tls.h @@ -30,7 +30,11 @@ typedef union dtv { size_t counter; - void *pointer; + struct + { + void *val; + bool is_static; + } pointer; } dtv_t; @@ -40,15 +44,28 @@ typedef struct thread descriptor used by libpthread. */ dtv_t *dtv; void *self; /* Pointer to the thread descriptor. */ + int multiple_threads; + uintptr_t sysinfo; } tcbhead_t; + +#else /* __ASSEMBLER__ */ +# include <tcb-offsets.h> #endif +/* We can support TLS only if the floating-stack support is available. + However, we want to compile in the support and test at runtime whether + the running kernel can support it or not. To avoid bothering with the + TLS support code at all, use configure --without-tls. + + We need USE_TLS to be consistently defined, for ldsodefs.h conditionals. + But some of the code below can cause problems in building libpthread + (e.g. useldt.h will defined FLOATING_STACKS when it shouldn't). */ -/* We can support TLS only if the floating-stack support is available. */ -#if defined FLOATING_STACKS && defined HAVE_TLS_SUPPORT +#if defined HAVE_TLS_SUPPORT \ + && (defined FLOATING_STACKS || !defined IS_IN_libpthread) /* Signal that TLS support is available. */ -//# define USE_TLS 1 +# define USE_TLS 1 # ifndef __ASSEMBLER__ /* Get system call information. */ @@ -97,7 +114,20 @@ typedef struct # define TLS_LOAD_EBX # endif +# if !defined IS_IN_libpthread && !defined DO_MODIFY_LDT +# include "useldt.h" /* For the structure. */ +# endif +# if __ASSUME_LDT_WORKS > 0 +# define TLS_DO_MODIFY_LDT_KERNEL_CHECK(doit) (doit) /* Nothing to check. */ +# else +# define TLS_DO_MODIFY_LDT_KERNEL_CHECK(doit) \ + (__builtin_expect (GLRO(dl_osversion) < 131939, 0) \ + ? "kernel too old for thread-local storage support\n" \ + : (doit)) +# endif + # define TLS_DO_MODIFY_LDT(descr, nr) \ +TLS_DO_MODIFY_LDT_KERNEL_CHECK( \ ({ \ struct modify_ldt_ldt_s ldt_entry = \ { nr, (unsigned long int) (descr), 0xfffff /* 4GB in pages */, \ @@ -113,8 +143,10 @@ typedef struct here. */ \ "m" (ldt_entry), TLS_EBX_ARG (1), "c" (&ldt_entry), \ "d" (sizeof (ldt_entry))); \ - __builtin_expect (result, 0) != 0 ? -1 : nr * 8 + 7; \ -}) + __builtin_expect (result, 0) == 0 \ + ? ({ __asm__ ("movw %w0, %%gs" : : "q" ((nr) * 8 + 7)); NULL; }) \ + : "cannot set up LDT for thread-local storage\n"; \ +})) # define TLS_DO_SET_THREAD_AREA(descr, secondcall) \ ({ \ @@ -135,50 +167,60 @@ typedef struct to let the compiler know that we are accessing LDT_ENTRY \ here. */ \ TLS_EBX_ARG (&ldt_entry), "m" (ldt_entry)); \ - __builtin_expect (result, 0) == 0 ? ldt_entry.entry_number * 8 + 3 : -1; \ + if (__builtin_expect (result, 0) == 0) \ + __asm__ ("movw %w0, %%gs" : : "q" (ldt_entry.entry_number * 8 + 3)); \ + result; \ }) # ifdef __ASSUME_SET_THREAD_AREA_SYSCALL -# define TLS_SETUP_GS_SEGMENT(descr, secondcall) \ - TLS_DO_SET_THREAD_AREA (descr, firstcall) +# define TLS_SETUP_GS_SEGMENT(descr, secondcall) \ + (TLS_DO_SET_THREAD_AREA (descr, secondcall) \ + ? "set_thread_area failed when setting up thread-local storage\n" : NULL) # elif defined __NR_set_thread_area # define TLS_SETUP_GS_SEGMENT(descr, secondcall) \ - ({ int __seg = TLS_DO_SET_THREAD_AREA (descr, secondcall); \ - __seg == -1 ? TLS_DO_MODIFY_LDT (descr, 0) : __seg; }) + (TLS_DO_SET_THREAD_AREA (descr, secondcall) \ + ? TLS_DO_MODIFY_LDT (descr, 0) : NULL) # else # define TLS_SETUP_GS_SEGMENT(descr, secondcall) \ TLS_DO_MODIFY_LDT ((descr), 0) # endif +#if defined NEED_DL_SYSINFO +# define INIT_SYSINFO \ + head->sysinfo = GLRO(dl_sysinfo) +#else +# define INIT_SYSINFO +#endif + /* Code to initially initialize the thread pointer. This might need special attention since 'errno' is not yet available and if the - operation can cause a failure 'errno' must not be touched. */ + operation can cause a failure 'errno' must not be touched. + + The value of this macro is null if successful, or an error string. */ # define TLS_INIT_TP(descr, secondcall) \ ({ \ void *_descr = (descr); \ tcbhead_t *head = _descr; \ - int __gs; \ \ head->tcb = _descr; \ /* For now the thread descriptor is at the same address. */ \ head->self = _descr; \ \ - __gs = TLS_SETUP_GS_SEGMENT (_descr, secondcall); \ - if (__builtin_expect (__gs, 7) != -1) \ - { \ - __asm__ ("movw %w0, %%gs" : : "q" (__gs)); \ - __gs = 0; \ - } \ - __gs; \ + INIT_SYSINFO; \ + TLS_SETUP_GS_SEGMENT (_descr, secondcall); \ }) +/* Indicate that dynamic linker shouldn't try to initialize TLS even + when no PT_TLS segments are found in the program and libraries + it is linked against. */ +# define TLS_INIT_TP_EXPENSIVE 1 /* Return the address of the dtv for the current thread. */ # define THREAD_DTV() \ ({ struct _pthread_descr_struct *__descr; \ THREAD_GETMEM (__descr, p_header.data.dtvp); }) -# endif /* FLOATING_STACKS && HAVE_TLS_SUPPORT */ +# endif /* HAVE_TLS_SUPPORT && (FLOATING_STACKS || !IS_IN_libpthread) */ #endif /* __ASSEMBLER__ */ #endif /* tls.h */ diff --git a/libpthread/linuxthreads/sysdeps/i386/useldt.h b/libpthread/linuxthreads/sysdeps/i386/useldt.h index 02326729a..067e5e242 100644 --- a/libpthread/linuxthreads/sysdeps/i386/useldt.h +++ b/libpthread/linuxthreads/sysdeps/i386/useldt.h @@ -21,6 +21,7 @@ #ifndef __ASSEMBLER__ #include <stddef.h> /* For offsetof. */ #include <stdlib.h> /* For abort(). */ +#include <sysdep.h> /* We don't want to include the kernel header. So duplicate the @@ -75,7 +76,7 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t); #ifdef __PIC__ # define USETLS_EBX_ARG "r" -# define USETLS_LOAD_EBX "xchgl %3, %%ebx\n\t" +# define USETLS_LOAD_EBX "xchgl %1, %%ebx\n\t" #else # define USETLS_EBX_ARG "b" # define USETLS_LOAD_EBX @@ -85,7 +86,7 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t); because we inherited the value set up in the main thread by TLS setup. We need to extract that value and set up the same segment in this thread. */ -#if USE_TLS +#ifdef __UCLIBC_HAS_TLS__ # define DO_SET_THREAD_AREA_REUSE(nr) 1 #else /* Without TLS, we do the initialization of the main thread, where NR == 0. */ @@ -107,8 +108,10 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t); "movl %2, %%eax\n\t" \ "int $0x80\n\t" \ USETLS_LOAD_EBX \ - : "&a" (__result) \ - : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area)); \ + : "=&a" (__result) \ + : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area), \ + "m" (ldt_entry) \ + : "memory"); \ if (__result == 0) \ __asm__ ("movw %w0, %%gs" :: "q" (__gs)); \ else \ @@ -125,8 +128,10 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t); "movl %2, %%eax\n\t" \ "int $0x80\n\t" \ USETLS_LOAD_EBX \ - : "&a" (__result) \ - : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area)); \ + : "=&a" (__result) \ + : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area), \ + "m" (ldt_entry) \ + : "memory"); \ if (__result == 0) \ { \ __gs = (ldt_entry.entry_number << 3) + 3; \ @@ -299,8 +304,10 @@ extern int __have_no_set_thread_area; }) #endif +#if __ASSUME_LDT_WORKS > 0 /* We want the OS to assign stack addresses. */ #define FLOATING_STACKS 1 /* Maximum size of the stack if the rlimit is unlimited. */ #define ARCH_STACK_MAX_SIZE 8*1024*1024 +#endif |