diff options
author | Bernhard Reutner-Fischer <rep.dot.nop@gmail.com> | 2015-03-26 23:12:38 +0100 |
---|---|---|
committer | Bernhard Reutner-Fischer <rep.dot.nop@gmail.com> | 2015-03-27 00:08:46 +0100 |
commit | cc8d7d062e2db0a1ed3973e6af38cd1938ba74d7 (patch) | |
tree | 9e2db5741cbcb01a246821cc4fc9dc4df8e263b4 /include/atomic.h | |
parent | 560f416794403be952e850756d7e8b23e8fa92ac (diff) |
include: update atomic.h from glibc
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
Diffstat (limited to 'include/atomic.h')
-rw-r--r-- | include/atomic.h | 235 |
1 files changed, 231 insertions, 4 deletions
diff --git a/include/atomic.h b/include/atomic.h index 307704cba..3680d8714 100644 --- a/include/atomic.h +++ b/include/atomic.h @@ -1,5 +1,5 @@ /* Internal macros for atomic operations for GNU C Library. - Copyright (C) 2002-2006, 2009 Free Software Foundation, Inc. + Copyright (C) 2002-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -32,7 +32,7 @@ the multi-thread case. The interfaces have the prefix "catomic_". - - support functions like barriers. They also have the preifx + - support functions like barriers. They also have the prefix "atomic_". Architectures must provide a few lowlevel macros (the compare @@ -198,8 +198,12 @@ /* Add VALUE to *MEM and return the old value of *MEM. */ -#ifndef atomic_exchange_and_add -# define atomic_exchange_and_add(mem, value) \ +#ifndef atomic_exchange_and_add_acq +# ifdef atomic_exchange_and_add +# define atomic_exchange_and_add_acq(mem, value) \ + atomic_exchange_and_add (mem, value) +# else +# define atomic_exchange_and_add_acq(mem, value) \ ({ __typeof (*(mem)) __atg6_oldval; \ __typeof (mem) __atg6_memp = (mem); \ __typeof (*(mem)) __atg6_value = (value); \ @@ -213,8 +217,18 @@ __atg6_oldval), 0)); \ \ __atg6_oldval; }) +# endif #endif +#ifndef atomic_exchange_and_add_rel +# define atomic_exchange_and_add_rel(mem, value) \ + atomic_exchange_and_add_acq(mem, value) +#endif + +#ifndef atomic_exchange_and_add +# define atomic_exchange_and_add(mem, value) \ + atomic_exchange_and_add_acq(mem, value) +#endif #ifndef catomic_exchange_and_add # define catomic_exchange_and_add(mem, value) \ @@ -528,6 +542,219 @@ ({ __typeof (x) __x; __asm__ ("" : "=r" (__x) : "0" (x)); __x; }) #endif +/* This is equal to 1 iff the architecture supports 64b atomic operations. */ +#define __HAVE_64B_ATOMICS 0 /* TODO: not yet used - Add these to arch bits! */ +#ifndef __HAVE_64B_ATOMICS +#error Unable to determine if 64-bit atomics are present. +#endif + +/* The following functions are a subset of the atomic operations provided by + C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's + atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */ + +/* Each arch can request to use compiler built-ins for C11 atomics. If it + does, all atomics will be based on these. */ +#if 0 /* not yet used USE_ATOMIC_COMPILER_BUILTINS */ + +/* We require 32b atomic operations; some archs also support 64b atomic + operations. */ +void __atomic_link_error (void); +# if __HAVE_64B_ATOMICS == 1 +# define __atomic_check_size(mem) \ + if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \ + __atomic_link_error (); +# else +# define __atomic_check_size(mem) \ + if (sizeof (*mem) != 4) \ + __atomic_link_error (); +# endif + +# define atomic_thread_fence_acquire() \ + __atomic_thread_fence (__ATOMIC_ACQUIRE) +# define atomic_thread_fence_release() \ + __atomic_thread_fence (__ATOMIC_RELEASE) +# define atomic_thread_fence_seq_cst() \ + __atomic_thread_fence (__ATOMIC_SEQ_CST) + +# define atomic_load_relaxed(mem) \ + ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_RELAXED); }) +# define atomic_load_acquire(mem) \ + ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_ACQUIRE); }) + +# define atomic_store_relaxed(mem, val) \ + do { \ + __atomic_check_size((mem)); \ + __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \ + } while (0) +# define atomic_store_release(mem, val) \ + do { \ + __atomic_check_size((mem)); \ + __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \ + } while (0) + +/* On failure, this CAS has memory_order_relaxed semantics. */ +# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \ + ({ __atomic_check_size((mem)); \ + __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \ + __ATOMIC_RELAXED, __ATOMIC_RELAXED); }) +# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \ + ({ __atomic_check_size((mem)); \ + __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \ + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); }) +# define atomic_compare_exchange_weak_release(mem, expected, desired) \ + ({ __atomic_check_size((mem)); \ + __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \ + __ATOMIC_RELEASE, __ATOMIC_RELAXED); }) + +# define atomic_exchange_acquire(mem, desired) \ + ({ __atomic_check_size((mem)); \ + __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); }) +# define atomic_exchange_release(mem, desired) \ + ({ __atomic_check_size((mem)); \ + __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); }) + +# define atomic_fetch_add_relaxed(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); }) +# define atomic_fetch_add_acquire(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); }) +# define atomic_fetch_add_release(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); }) +# define atomic_fetch_add_acq_rel(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); }) + +# define atomic_fetch_and_acquire(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); }) + +# define atomic_fetch_or_relaxed(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); }) +# define atomic_fetch_or_acquire(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); }) + +#else /* !USE_ATOMIC_COMPILER_BUILTINS */ + +/* By default, we assume that read, write, and full barriers are equivalent + to acquire, release, and seq_cst barriers. Archs for which this does not + hold have to provide custom definitions of the fences. */ +# ifndef atomic_thread_fence_acquire +# define atomic_thread_fence_acquire() atomic_read_barrier () +# endif +# ifndef atomic_thread_fence_release +# define atomic_thread_fence_release() atomic_write_barrier () +# endif +# ifndef atomic_thread_fence_seq_cst +# define atomic_thread_fence_seq_cst() atomic_full_barrier () +# endif + +# ifndef atomic_load_relaxed +# define atomic_load_relaxed(mem) \ + ({ __typeof (*(mem)) __atg100_val; \ + __asm__ ("" : "=r" (__atg100_val) : "0" (*(mem))); \ + __atg100_val; }) +# endif +# ifndef atomic_load_acquire +# define atomic_load_acquire(mem) \ + ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \ + atomic_thread_fence_acquire (); \ + __atg101_val; }) +# endif + +# ifndef atomic_store_relaxed +/* XXX Use inline asm here? */ +# define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0) +# endif +# ifndef atomic_store_release +# define atomic_store_release(mem, val) \ + do { \ + atomic_thread_fence_release (); \ + atomic_store_relaxed ((mem), (val)); \ + } while (0) +# endif + +/* On failure, this CAS has memory_order_relaxed semantics. */ +/* XXX This potentially has one branch more than necessary, but archs + currently do not define a CAS that returns both the previous value and + the success flag. */ +# ifndef atomic_compare_exchange_weak_acquire +# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \ + ({ __typeof (*(expected)) __atg102_expected = *(expected); \ + *(expected) = \ + atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \ + *(expected) == __atg102_expected; }) +# endif +# ifndef atomic_compare_exchange_weak_relaxed +/* XXX Fall back to CAS with acquire MO because archs do not define a weaker + CAS. */ +# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \ + atomic_compare_exchange_weak_acquire ((mem), (expected), (desired)) +# endif +# ifndef atomic_compare_exchange_weak_release +# define atomic_compare_exchange_weak_release(mem, expected, desired) \ + ({ __typeof (*(expected)) __atg103_expected = *(expected); \ + *(expected) = \ + atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \ + *(expected) == __atg103_expected; }) +# endif + +# ifndef atomic_exchange_acquire +# define atomic_exchange_acquire(mem, val) \ + atomic_exchange_acq ((mem), (val)) +# endif +# ifndef atomic_exchange_release +# define atomic_exchange_release(mem, val) \ + atomic_exchange_rel ((mem), (val)) +# endif + +# ifndef atomic_fetch_add_acquire +# define atomic_fetch_add_acquire(mem, operand) \ + atomic_exchange_and_add_acq ((mem), (operand)) +# endif +# ifndef atomic_fetch_add_relaxed +/* XXX Fall back to acquire MO because the MO semantics of + atomic_exchange_and_add are not documented; the generic version falls back + to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined, + and vice versa. */ +# define atomic_fetch_add_relaxed(mem, operand) \ + atomic_fetch_add_acquire ((mem), (operand)) +# endif +# ifndef atomic_fetch_add_release +# define atomic_fetch_add_release(mem, operand) \ + atomic_exchange_and_add_rel ((mem), (operand)) +# endif +# ifndef atomic_fetch_add_acq_rel +# define atomic_fetch_add_acq_rel(mem, operand) \ + ({ atomic_thread_fence_release (); \ + atomic_exchange_and_add_acq ((mem), (operand)); }) +# endif + +/* XXX The default for atomic_and_val has acquire semantics, but this is not + documented. */ +# ifndef atomic_fetch_and_acquire +# define atomic_fetch_and_acquire(mem, operand) \ + atomic_and_val ((mem), (operand)) +# endif + +/* XXX The default for atomic_or_val has acquire semantics, but this is not + documented. */ +# ifndef atomic_fetch_or_acquire +# define atomic_fetch_or_acquire(mem, operand) \ + atomic_or_val ((mem), (operand)) +# endif +/* XXX Fall back to acquire MO because archs do not define a weaker + atomic_or_val. */ +# ifndef atomic_fetch_or_relaxed +# define atomic_fetch_or_relaxed(mem, operand) \ + atomic_fetch_or_acquire ((mem), (operand)) +# endif + +#endif /* !USE_ATOMIC_COMPILER_BUILTINS */ + #ifndef atomic_delay # define atomic_delay() do { /* nothing */ } while (0) |