summaryrefslogtreecommitdiff
path: root/libpthread/linuxthreads/sysdeps/powerpc
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2015-02-14 23:04:02 -0600
committerWaldemar Brodkorb <wbx@openadk.org>2015-02-14 23:04:02 -0600
commite1e46622ac0fce73d802fa4a8a2e83cc25cd9e7a (patch)
tree2c8d14e4dcc733f4227f0984ed05da5826b13acd /libpthread/linuxthreads/sysdeps/powerpc
parent6b6ede3d15f04fe825cfa9f697507457e3640344 (diff)
Revert "resolve merge"
This reverts commit 6b6ede3d15f04fe825cfa9f697507457e3640344.
Diffstat (limited to 'libpthread/linuxthreads/sysdeps/powerpc')
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/powerpc32/pspinlock.c69
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h119
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/powerpc64/pspinlock.c69
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/powerpc64/pt-machine.h184
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/pspinlock.c8
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h103
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/tcb-offsets.sym19
-rw-r--r--libpthread/linuxthreads/sysdeps/powerpc/tls.h164
8 files changed, 637 insertions, 98 deletions
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/powerpc32/pspinlock.c b/libpthread/linuxthreads/sysdeps/powerpc/powerpc32/pspinlock.c
new file mode 100644
index 000000000..875aa3876
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/powerpc/powerpc32/pspinlock.c
@@ -0,0 +1,69 @@
+/* POSIX spinlock implementation. PowerPC version.
+ Copyright (C) 2000, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+#include <errno.h>
+#include <pthread.h>
+#include "internals.h"
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ while (! __compare_and_swap ((long int *)lock, 0, 1))
+ ;
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ return __compare_and_swap ((long int *)lock, 0, 1) ? 0 : EBUSY;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ MEMORY_BARRIER ();
+ *lock = 0;
+ return 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ *lock = 0;
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h b/libpthread/linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h
new file mode 100644
index 000000000..a2b8b61e6
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h
@@ -0,0 +1,119 @@
+/* Machine-dependent pthreads configuration and inline functions.
+ powerpc version.
+ Copyright (C) 1996, 1997, 1998, 2000, 2001, 2002, 2003
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+/* These routines are from Appendix G of the 'PowerPC 601 RISC Microprocessor
+ User's Manual', by IBM and Motorola. */
+
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
+#ifndef PT_EI
+# define PT_EI __extern_always_inline
+#endif
+
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+
+/* For multiprocessor systems, we want to ensure all memory accesses
+ are completed before we reset a lock. On other systems, we still
+ need to make sure that the compiler has flushed everything to memory. */
+#define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory")
+
+/* We want the OS to assign stack addresses. */
+#define FLOATING_STACKS 1
+
+/* Maximum size of the stack if the rlimit is unlimited. */
+#define ARCH_STACK_MAX_SIZE 8*1024*1024
+
+/* Get some notion of the current stack. Need not be exactly the top
+ of the stack, just something somewhere in the current frame. */
+#define CURRENT_STACK_FRAME stack_pointer
+register char * stack_pointer __asm__ ("r1");
+
+/* Register r2 (tp) is reserved by the ABI as "thread pointer". */
+struct _pthread_descr_struct;
+register struct _pthread_descr_struct *__thread_self __asm__("r2");
+
+/* Return the thread descriptor for the current thread. */
+#define THREAD_SELF __thread_self
+
+/* Initialize the thread-unique value. */
+#define INIT_THREAD_SELF(descr, nr) (__thread_self = (descr))
+
+/* Access to data in the thread descriptor is easy. */
+#define THREAD_GETMEM(descr, member) \
+ ((void) (descr), THREAD_SELF->member)
+#define THREAD_GETMEM_NC(descr, member) \
+ ((void) (descr), THREAD_SELF->member)
+#define THREAD_SETMEM(descr, member, value) \
+ ((void) (descr), THREAD_SELF->member = (value))
+#define THREAD_SETMEM_NC(descr, member, value) \
+ ((void) (descr), THREAD_SELF->member = (value))
+
+/* Compare-and-swap for semaphores. */
+/* note that test-and-set(x) is the same as !compare-and-swap(x, 0, 1) */
+
+#define HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
+#define IMPLEMENT_TAS_WITH_CAS
+
+PT_EI int
+__compare_and_swap (long int *p, long int oldval, long int newval)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stwcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r"(ret)
+ : "r"(p), "r"(newval), "r"(oldval)
+ : "cr0", "memory");
+ /* This version of __compare_and_swap is to be used when acquiring
+ a lock, so we don't need to worry about whether other memory
+ operations have completed, but we do need to be sure that any loads
+ after this point really occur after we have acquired the lock. */
+ __asm__ __volatile__ ("isync" : : : "memory");
+ return ret == 0;
+}
+
+PT_EI int
+__compare_and_swap_with_release_semantics (long int *p,
+ long int oldval, long int newval)
+{
+ int ret;
+
+ MEMORY_BARRIER ();
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stwcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r"(ret)
+ : "r"(p), "r"(newval), "r"(oldval)
+ : "cr0", "memory");
+ return ret == 0;
+}
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/powerpc64/pspinlock.c b/libpthread/linuxthreads/sysdeps/powerpc/powerpc64/pspinlock.c
new file mode 100644
index 000000000..f588c62c7
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/powerpc/powerpc64/pspinlock.c
@@ -0,0 +1,69 @@
+/* POSIX spinlock implementation. PowerPC version.
+ Copyright (C) 2000, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+#include <errno.h>
+#include <pthread.h>
+#include "internals.h"
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ while (! __compare_and_swap32 ((int *)lock, 0, 1))
+ ;
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ return __compare_and_swap32 ((int *)lock, 0, 1) ? 0 : EBUSY;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ MEMORY_BARRIER ();
+ *lock = 0;
+ return 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ *lock = 0;
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/powerpc64/pt-machine.h b/libpthread/linuxthreads/sysdeps/powerpc/powerpc64/pt-machine.h
new file mode 100644
index 000000000..b9193a871
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/powerpc/powerpc64/pt-machine.h
@@ -0,0 +1,184 @@
+/* Machine-dependent pthreads configuration and inline functions.
+ powerpc version.
+ Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+/* These routines are from Appendix G of the 'PowerPC 601 RISC Microprocessor
+ User's Manual', by IBM and Motorola. */
+
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
+#ifndef PT_EI
+# define PT_EI __extern_always_inline
+#endif
+
+extern long int testandset (int *spinlock);
+extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+extern int __compare_and_swap32 (int *p, int oldval, int newval);
+
+/* For multiprocessor systems, we want to ensure all memory accesses
+ are completed before we reset a lock. On other systems, we still
+ need to make sure that the compiler has flushed everything to memory. */
+#define MEMORY_BARRIER() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define READ_MEMORY_BARRIER() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define WRITE_MEMORY_BARRIER() __asm__ __volatile__ ("eieio" : : : "memory")
+
+/* We want the OS to assign stack addresses. */
+#define FLOATING_STACKS 1
+
+/* Maximum size of the stack if the rlimit is unlimited. */
+#define ARCH_STACK_MAX_SIZE 16*1024*1024
+
+/* Get some notion of the current stack. Need not be exactly the top
+ of the stack, just something somewhere in the current frame. */
+#define CURRENT_STACK_FRAME stack_pointer
+register char * stack_pointer __asm__ ("r1");
+
+/* Register r13 (tp) is reserved by the ABI as "thread pointer". */
+struct _pthread_descr_struct;
+register struct _pthread_descr_struct *__thread_self __asm__("r13");
+
+/* Return the thread descriptor for the current thread. */
+#define THREAD_SELF __thread_self
+
+/* Initialize the thread-unique value. */
+#define INIT_THREAD_SELF(descr, nr) (__thread_self = (descr))
+
+/* Access to data in the thread descriptor is easy. */
+#define THREAD_GETMEM(descr, member) \
+ ((void) (descr), THREAD_SELF->member)
+#define THREAD_GETMEM_NC(descr, member) \
+ ((void) (descr), THREAD_SELF->member)
+#define THREAD_SETMEM(descr, member, value) \
+ ((void) (descr), THREAD_SELF->member = (value))
+#define THREAD_SETMEM_NC(descr, member, value) \
+ ((void) (descr), THREAD_SELF->member = (value))
+
+/* Compare-and-swap for semaphores. */
+/* note that test-and-set(x) is the same as !compare-and-swap(x, 0, 1) */
+
+#define HAS_COMPARE_AND_SWAP
+#define HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
+
+PT_EI int
+__compare_and_swap (long int *p, long int oldval, long int newval)
+{
+ long int ret;
+
+ __asm__ __volatile__ (
+ "0: ldarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stdcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r"(ret)
+ : "r"(p), "r"(newval), "r"(oldval)
+ : "cr0", "memory");
+ /* This version of __compare_and_swap is to be used when acquiring
+ a lock, so we don't need to worry about whether other memory
+ operations have completed, but we do need to be sure that any loads
+ after this point really occur after we have acquired the lock. */
+ __asm__ __volatile__ ("isync" : : : "memory");
+ return (int)(ret == 0);
+}
+
+PT_EI int
+__compare_and_swap_with_release_semantics (long int *p,
+ long int oldval, long int newval)
+{
+ long int ret;
+
+ MEMORY_BARRIER ();
+ __asm__ __volatile__ (
+ "0: ldarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stdcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r"(ret)
+ : "r"(p), "r"(newval), "r"(oldval)
+ : "cr0", "memory");
+ return (int)(ret == 0);
+}
+
+PT_EI int
+__compare_and_swap32 (int *p, int oldval, int newval)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stwcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r"(ret)
+ : "r"(p), "r"(newval), "r"(oldval)
+ : "cr0", "memory");
+ /* This version of __compare_and_swap is to be used when acquiring
+ a lock, so we don't need to worry about whether other memory
+ operations have completed, but we do need to be sure that any loads
+ after this point really occur after we have acquired the lock. */
+ __asm__ __volatile__ ("isync" : : : "memory");
+ return (int)(ret == 0);
+}
+
+PT_EI int
+__compare_and_swap32_with_release_semantics (long int *p,
+ long int oldval, long int newval)
+{
+ long int ret;
+
+ MEMORY_BARRIER ();
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1 ;"
+ " xor. %0,%3,%0;"
+ " bne 1f;"
+ " stwcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r"(ret)
+ : "r"(p), "r"(newval), "r"(oldval)
+ : "cr0", "memory");
+ return (int)(ret == 0);
+}
+
+PT_EI long int
+testandset (int *p)
+{
+ long int ret, val = 1;
+
+ MEMORY_BARRIER ();
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1 ;"
+ " cmpwi 0,%0,0;"
+ " bne 1f;"
+ " stwcx. %2,0,%1;"
+ " bne- 0b;"
+ "1: "
+ : "=&r"(ret)
+ : "r"(p), "r" (val)
+ : "cr0", "memory");
+ MEMORY_BARRIER ();
+ return ret != 0;
+}
+
+#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/pspinlock.c b/libpthread/linuxthreads/sysdeps/powerpc/pspinlock.c
new file mode 100644
index 000000000..bb88a0690
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/powerpc/pspinlock.c
@@ -0,0 +1,8 @@
+#include <features.h>
+#include <bits/wordsize.h>
+
+#if __WORDSIZE == 32
+# include "powerpc32/pspinlock.c"
+#else
+# include "powerpc64/pspinlock.c"
+#endif
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h b/libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h
index aa2d206b0..55e922efd 100644
--- a/libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/powerpc/pt-machine.h
@@ -1,101 +1,8 @@
-/* Machine-dependent pthreads configuration and inline functions.
- powerpc version.
- Copyright (C) 1996, 1997, 1998, 2000, 2001, 2002 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of the
- License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; see the file COPYING.LIB. If
- not, see <http://www.gnu.org/licenses/>. */
-
-/* These routines are from Appendix G of the 'PowerPC 601 RISC Microprocessor
- User's Manual', by IBM and Motorola. */
-
-#ifndef _PT_MACHINE_H
-#define _PT_MACHINE_H 1
-
#include <features.h>
+#include <bits/wordsize.h>
-#ifndef PT_EI
-# define PT_EI __extern_always_inline
+#if __WORDSIZE == 32
+# include "powerpc32/pt-machine.h"
+#else
+# include "powerpc64/pt-machine.h"
#endif
-
-/* For multiprocessor systems, we want to ensure all memory accesses
- are completed before we reset a lock. On other systems, we still
- need to make sure that the compiler has flushed everything to memory. */
-#define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory")
-
-/* Get some notion of the current stack. Need not be exactly the top
- of the stack, just something somewhere in the current frame. */
-#define CURRENT_STACK_FRAME stack_pointer
-register char * stack_pointer __asm__ ("r1");
-
-/* Register r2 (tp) is reserved by the ABI as "thread pointer". */
-struct _pthread_descr_struct;
-register struct _pthread_descr_struct *__thread_self __asm__("r2");
-
-/* Return the thread descriptor for the current thread. */
-#define THREAD_SELF __thread_self
-
-/* Initialize the thread-unique value. */
-#define INIT_THREAD_SELF(descr, nr) (__thread_self = (descr))
-
-/* Compare-and-swap for semaphores. */
-/* note that test-and-set(x) is the same as !compare-and-swap(x, 0, 1) */
-
-#define HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
-#define IMPLEMENT_TAS_WITH_CAS
-
-PT_EI int
-__compare_and_swap (long int *p, long int oldval, long int newval)
-{
- int ret;
-
- __asm__ __volatile__ (
- "0: lwarx %0,0,%1 ;"
- " xor. %0,%3,%0;"
- " bne 1f;"
- " stwcx. %2,0,%1;"
- " bne- 0b;"
- "1: "
- : "=&r"(ret)
- : "r"(p), "r"(newval), "r"(oldval)
- : "cr0", "memory");
- /* This version of __compare_and_swap is to be used when acquiring
- a lock, so we don't need to worry about whether other memory
- operations have completed, but we do need to be sure that any loads
- after this point really occur after we have acquired the lock. */
- __asm__ __volatile__ ("isync" : : : "memory");
- return ret == 0;
-}
-
-PT_EI int
-__compare_and_swap_with_release_semantics (long int *p,
- long int oldval, long int newval)
-{
- int ret;
-
- MEMORY_BARRIER ();
- __asm__ __volatile__ (
- "0: lwarx %0,0,%1 ;"
- " xor. %0,%3,%0;"
- " bne 1f;"
- " stwcx. %2,0,%1;"
- " bne- 0b;"
- "1: "
- : "=&r"(ret)
- : "r"(p), "r"(newval), "r"(oldval)
- : "cr0", "memory");
- return ret == 0;
-}
-
-#endif /* pt-machine.h */
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/tcb-offsets.sym b/libpthread/linuxthreads/sysdeps/powerpc/tcb-offsets.sym
new file mode 100644
index 000000000..7940cf620
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/powerpc/tcb-offsets.sym
@@ -0,0 +1,19 @@
+#include <sysdep.h>
+#include <tls.h>
+
+-- This line separates the #include lines from conditionals.
+
+# ifdef __UCLIBC_HAS_TLS__
+
+-- Abuse tls.h macros to derive offsets relative to the thread register.
+# undef __thread_register
+# define __thread_register ((void *) 0)
+# define thread_offsetof(mem) ((ptrdiff_t) THREAD_SELF + offsetof (struct _pthread_descr_struct, p_##mem))
+
+# else
+
+# define thread_offsetof(mem) offsetof (tcbhead_t, mem)
+
+# endif
+
+MULTIPLE_THREADS_OFFSET thread_offsetof (multiple_threads)
diff --git a/libpthread/linuxthreads/sysdeps/powerpc/tls.h b/libpthread/linuxthreads/sysdeps/powerpc/tls.h
new file mode 100644
index 000000000..8555b239d
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/powerpc/tls.h
@@ -0,0 +1,164 @@
+/* Definitions for thread-local data handling. linuxthreads/PPC version.
+ Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _TLS_H
+#define _TLS_H
+
+#ifndef __ASSEMBLER__
+
+# include <pt-machine.h>
+# include <stdbool.h>
+# include <stddef.h>
+
+/* Type for the dtv. */
+typedef union dtv
+{
+ size_t counter;
+ struct
+ {
+ void *val;
+ bool is_static;
+ } pointer;
+} dtv_t;
+
+#else /* __ASSEMBLER__ */
+# include <tcb-offsets.h>
+#endif /* __ASSEMBLER__ */
+
+#ifdef HAVE_TLS_SUPPORT
+
+/* Signal that TLS support is available. */
+# define USE_TLS 1
+
+# ifndef __ASSEMBLER__
+
+/* This layout is actually wholly private and not affected by the ABI.
+ Nor does it overlap the pthread data structure, so we need nothing
+ extra here at all. */
+typedef struct
+{
+ dtv_t *dtv;
+} tcbhead_t;
+
+/* This is the size of the initial TCB. */
+# define TLS_INIT_TCB_SIZE 0
+
+/* Alignment requirements for the initial TCB. */
+# define TLS_INIT_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
+
+/* This is the size of the TCB. */
+# define TLS_TCB_SIZE 0
+
+/* Alignment requirements for the TCB. */
+# define TLS_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
+
+/* This is the size we need before TCB. */
+# define TLS_PRE_TCB_SIZE \
+ (sizeof (struct _pthread_descr_struct) \
+ + ((sizeof (tcbhead_t) + TLS_TCB_ALIGN - 1) & ~(TLS_TCB_ALIGN - 1)))
+
+/* The following assumes that TP (R2 or R13) is points to the end of the
+ TCB + 0x7000 (per the ABI). This implies that TCB address is
+ TP - 0x7000. As we define TLS_DTV_AT_TP we can
+ assume that the pthread_descr is allocated immediately ahead of the
+ TCB. This implies that the pthread_descr address is
+ TP - (TLS_PRE_TCB_SIZE + 0x7000). */
+#define TLS_TCB_OFFSET 0x7000
+
+/* The DTV is allocated at the TP; the TCB is placed elsewhere. */
+/* This is not really true for powerpc64. We are following alpha
+ where the DTV pointer is first doubleword in the TCB. */
+# define TLS_DTV_AT_TP 1
+
+/* Install the dtv pointer. The pointer passed is to the element with
+ index -1 which contain the length. */
+# define INSTALL_DTV(TCBP, DTVP) \
+ (((tcbhead_t *) (TCBP))[-1].dtv = (DTVP) + 1)
+
+/* Install new dtv for current thread. */
+# define INSTALL_NEW_DTV(DTV) (THREAD_DTV() = (DTV))
+
+/* Return dtv of given thread descriptor. */
+# define GET_DTV(TCBP) (((tcbhead_t *) (TCBP))[-1].dtv)
+
+/* We still need this define so that tcb-offsets.sym can override it and
+ use THREAD_SELF to generate MULTIPLE_THREADS_OFFSET. */
+# define __thread_register ((void *) __thread_self)
+
+/* Code to initially initialize the thread pointer. This might need
+ special attention since 'errno' is not yet available and if the
+ operation can cause a failure 'errno' must not be touched.
+
+ The global register variable is declared in pt-machine.h with the
+ wrong type, so we need some extra casts to get the desired result.
+ This avoids a lvalue cast that gcc-3.4 does not like. */
+# define TLS_INIT_TP(TCBP, SECONDCALL) \
+ (__thread_self = (struct _pthread_descr_struct *) \
+ ((void *) (TCBP) + TLS_TCB_OFFSET), NULL)
+
+/* Return the address of the dtv for the current thread. */
+# define THREAD_DTV() \
+ (((tcbhead_t *) ((void *) __thread_self - TLS_TCB_OFFSET))[-1].dtv)
+
+/* Return the thread descriptor for the current thread. */
+# undef THREAD_SELF
+# define THREAD_SELF \
+ ((pthread_descr) (__thread_register \
+ - TLS_TCB_OFFSET - TLS_PRE_TCB_SIZE))
+
+# undef INIT_THREAD_SELF
+# define INIT_THREAD_SELF(DESCR, NR) \
+ (__thread_self = (struct _pthread_descr_struct *)((void *) (DESCR) \
+ + TLS_TCB_OFFSET + TLS_PRE_TCB_SIZE))
+
+/* Make sure we have the p_multiple_threads member in the thread structure.
+ See below. */
+# define TLS_MULTIPLE_THREADS_IN_TCB 1
+
+/* Get the thread descriptor definition. */
+# include <linuxthreads/descr.h>
+
+/* l_tls_offset == 0 is perfectly valid on PPC, so we have to use some
+ different value to mean unset l_tls_offset. */
+# define NO_TLS_OFFSET -1
+
+# endif /* __ASSEMBLER__ */
+
+#elif !defined __ASSEMBLER__
+
+/* This overlaps the start of the pthread_descr. System calls
+ and such use this to find the multiple_threads flag and need
+ to use the same offset relative to the thread register in both
+ single-threaded and multi-threaded code. */
+typedef struct
+{
+ void *tcb; /* Never used. */
+ dtv_t *dtv; /* Never used. */
+ void *self; /* Used only if multithreaded, and rarely. */
+ int multiple_threads; /* Only this member is really used. */
+} tcbhead_t;
+
+#define NONTLS_INIT_TP \
+ do { \
+ static const tcbhead_t nontls_init_tp = { .multiple_threads = 0 }; \
+ __thread_self = (__typeof (__thread_self)) &nontls_init_tp; \
+ } while (0)
+
+#endif /* HAVE_TLS_SUPPORT */
+
+#endif /* tls.h */