summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBernhard Reutner-Fischer <rep.dot.nop@gmail.com>2015-03-26 23:12:38 +0100
committerBernhard Reutner-Fischer <rep.dot.nop@gmail.com>2015-03-27 00:08:46 +0100
commitcc8d7d062e2db0a1ed3973e6af38cd1938ba74d7 (patch)
tree9e2db5741cbcb01a246821cc4fc9dc4df8e263b4
parent560f416794403be952e850756d7e8b23e8fa92ac (diff)
include: update atomic.h from glibc
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
-rw-r--r--include/atomic.h235
-rw-r--r--test/.gitignore2
-rw-r--r--test/silly/Makefile.in3
-rw-r--r--test/silly/hello.c2
-rw-r--r--test/silly/tst-atomic-long.c27
-rw-r--r--test/silly/tst-atomic.c573
6 files changed, 837 insertions, 5 deletions
diff --git a/include/atomic.h b/include/atomic.h
index 307704cba..3680d8714 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -1,5 +1,5 @@
/* Internal macros for atomic operations for GNU C Library.
- Copyright (C) 2002-2006, 2009 Free Software Foundation, Inc.
+ Copyright (C) 2002-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -32,7 +32,7 @@
the multi-thread case. The interfaces have the prefix
"catomic_".
- - support functions like barriers. They also have the preifx
+ - support functions like barriers. They also have the prefix
"atomic_".
Architectures must provide a few lowlevel macros (the compare
@@ -198,8 +198,12 @@
/* Add VALUE to *MEM and return the old value of *MEM. */
-#ifndef atomic_exchange_and_add
-# define atomic_exchange_and_add(mem, value) \
+#ifndef atomic_exchange_and_add_acq
+# ifdef atomic_exchange_and_add
+# define atomic_exchange_and_add_acq(mem, value) \
+ atomic_exchange_and_add (mem, value)
+# else
+# define atomic_exchange_and_add_acq(mem, value) \
({ __typeof (*(mem)) __atg6_oldval; \
__typeof (mem) __atg6_memp = (mem); \
__typeof (*(mem)) __atg6_value = (value); \
@@ -213,8 +217,18 @@
__atg6_oldval), 0)); \
\
__atg6_oldval; })
+# endif
#endif
+#ifndef atomic_exchange_and_add_rel
+# define atomic_exchange_and_add_rel(mem, value) \
+ atomic_exchange_and_add_acq(mem, value)
+#endif
+
+#ifndef atomic_exchange_and_add
+# define atomic_exchange_and_add(mem, value) \
+ atomic_exchange_and_add_acq(mem, value)
+#endif
#ifndef catomic_exchange_and_add
# define catomic_exchange_and_add(mem, value) \
@@ -528,6 +542,219 @@
({ __typeof (x) __x; __asm__ ("" : "=r" (__x) : "0" (x)); __x; })
#endif
+/* This is equal to 1 iff the architecture supports 64b atomic operations. */
+#define __HAVE_64B_ATOMICS 0 /* TODO: not yet used - Add these to arch bits! */
+#ifndef __HAVE_64B_ATOMICS
+#error Unable to determine if 64-bit atomics are present.
+#endif
+
+/* The following functions are a subset of the atomic operations provided by
+ C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
+ atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
+
+/* Each arch can request to use compiler built-ins for C11 atomics. If it
+ does, all atomics will be based on these. */
+#if 0 /* not yet used USE_ATOMIC_COMPILER_BUILTINS */
+
+/* We require 32b atomic operations; some archs also support 64b atomic
+ operations. */
+void __atomic_link_error (void);
+# if __HAVE_64B_ATOMICS == 1
+# define __atomic_check_size(mem) \
+ if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
+ __atomic_link_error ();
+# else
+# define __atomic_check_size(mem) \
+ if (sizeof (*mem) != 4) \
+ __atomic_link_error ();
+# endif
+
+# define atomic_thread_fence_acquire() \
+ __atomic_thread_fence (__ATOMIC_ACQUIRE)
+# define atomic_thread_fence_release() \
+ __atomic_thread_fence (__ATOMIC_RELEASE)
+# define atomic_thread_fence_seq_cst() \
+ __atomic_thread_fence (__ATOMIC_SEQ_CST)
+
+# define atomic_load_relaxed(mem) \
+ ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_RELAXED); })
+# define atomic_load_acquire(mem) \
+ ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
+
+# define atomic_store_relaxed(mem, val) \
+ do { \
+ __atomic_check_size((mem)); \
+ __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
+ } while (0)
+# define atomic_store_release(mem, val) \
+ do { \
+ __atomic_check_size((mem)); \
+ __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
+ } while (0)
+
+/* On failure, this CAS has memory_order_relaxed semantics. */
+# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED); })
+# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); })
+# define atomic_compare_exchange_weak_release(mem, expected, desired) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
+ __ATOMIC_RELEASE, __ATOMIC_RELAXED); })
+
+# define atomic_exchange_acquire(mem, desired) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); })
+# define atomic_exchange_release(mem, desired) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); })
+
+# define atomic_fetch_add_relaxed(mem, operand) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); })
+# define atomic_fetch_add_acquire(mem, operand) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
+# define atomic_fetch_add_release(mem, operand) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
+# define atomic_fetch_add_acq_rel(mem, operand) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); })
+
+# define atomic_fetch_and_acquire(mem, operand) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
+
+# define atomic_fetch_or_relaxed(mem, operand) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); })
+# define atomic_fetch_or_acquire(mem, operand) \
+ ({ __atomic_check_size((mem)); \
+ __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
+
+#else /* !USE_ATOMIC_COMPILER_BUILTINS */
+
+/* By default, we assume that read, write, and full barriers are equivalent
+ to acquire, release, and seq_cst barriers. Archs for which this does not
+ hold have to provide custom definitions of the fences. */
+# ifndef atomic_thread_fence_acquire
+# define atomic_thread_fence_acquire() atomic_read_barrier ()
+# endif
+# ifndef atomic_thread_fence_release
+# define atomic_thread_fence_release() atomic_write_barrier ()
+# endif
+# ifndef atomic_thread_fence_seq_cst
+# define atomic_thread_fence_seq_cst() atomic_full_barrier ()
+# endif
+
+# ifndef atomic_load_relaxed
+# define atomic_load_relaxed(mem) \
+ ({ __typeof (*(mem)) __atg100_val; \
+ __asm__ ("" : "=r" (__atg100_val) : "0" (*(mem))); \
+ __atg100_val; })
+# endif
+# ifndef atomic_load_acquire
+# define atomic_load_acquire(mem) \
+ ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \
+ atomic_thread_fence_acquire (); \
+ __atg101_val; })
+# endif
+
+# ifndef atomic_store_relaxed
+/* XXX Use inline asm here? */
+# define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
+# endif
+# ifndef atomic_store_release
+# define atomic_store_release(mem, val) \
+ do { \
+ atomic_thread_fence_release (); \
+ atomic_store_relaxed ((mem), (val)); \
+ } while (0)
+# endif
+
+/* On failure, this CAS has memory_order_relaxed semantics. */
+/* XXX This potentially has one branch more than necessary, but archs
+ currently do not define a CAS that returns both the previous value and
+ the success flag. */
+# ifndef atomic_compare_exchange_weak_acquire
+# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
+ ({ __typeof (*(expected)) __atg102_expected = *(expected); \
+ *(expected) = \
+ atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \
+ *(expected) == __atg102_expected; })
+# endif
+# ifndef atomic_compare_exchange_weak_relaxed
+/* XXX Fall back to CAS with acquire MO because archs do not define a weaker
+ CAS. */
+# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
+ atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
+# endif
+# ifndef atomic_compare_exchange_weak_release
+# define atomic_compare_exchange_weak_release(mem, expected, desired) \
+ ({ __typeof (*(expected)) __atg103_expected = *(expected); \
+ *(expected) = \
+ atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \
+ *(expected) == __atg103_expected; })
+# endif
+
+# ifndef atomic_exchange_acquire
+# define atomic_exchange_acquire(mem, val) \
+ atomic_exchange_acq ((mem), (val))
+# endif
+# ifndef atomic_exchange_release
+# define atomic_exchange_release(mem, val) \
+ atomic_exchange_rel ((mem), (val))
+# endif
+
+# ifndef atomic_fetch_add_acquire
+# define atomic_fetch_add_acquire(mem, operand) \
+ atomic_exchange_and_add_acq ((mem), (operand))
+# endif
+# ifndef atomic_fetch_add_relaxed
+/* XXX Fall back to acquire MO because the MO semantics of
+ atomic_exchange_and_add are not documented; the generic version falls back
+ to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
+ and vice versa. */
+# define atomic_fetch_add_relaxed(mem, operand) \
+ atomic_fetch_add_acquire ((mem), (operand))
+# endif
+# ifndef atomic_fetch_add_release
+# define atomic_fetch_add_release(mem, operand) \
+ atomic_exchange_and_add_rel ((mem), (operand))
+# endif
+# ifndef atomic_fetch_add_acq_rel
+# define atomic_fetch_add_acq_rel(mem, operand) \
+ ({ atomic_thread_fence_release (); \
+ atomic_exchange_and_add_acq ((mem), (operand)); })
+# endif
+
+/* XXX The default for atomic_and_val has acquire semantics, but this is not
+ documented. */
+# ifndef atomic_fetch_and_acquire
+# define atomic_fetch_and_acquire(mem, operand) \
+ atomic_and_val ((mem), (operand))
+# endif
+
+/* XXX The default for atomic_or_val has acquire semantics, but this is not
+ documented. */
+# ifndef atomic_fetch_or_acquire
+# define atomic_fetch_or_acquire(mem, operand) \
+ atomic_or_val ((mem), (operand))
+# endif
+/* XXX Fall back to acquire MO because archs do not define a weaker
+ atomic_or_val. */
+# ifndef atomic_fetch_or_relaxed
+# define atomic_fetch_or_relaxed(mem, operand) \
+ atomic_fetch_or_acquire ((mem), (operand))
+# endif
+
+#endif /* !USE_ATOMIC_COMPILER_BUILTINS */
+
#ifndef atomic_delay
# define atomic_delay() do { /* nothing */ } while (0)
diff --git a/test/.gitignore b/test/.gitignore
index beb50958d..0d19285e9 100644
--- a/test/.gitignore
+++ b/test/.gitignore
@@ -263,6 +263,8 @@ signal/tst-sigset
signal/tst-sigsimple
silly/hello
silly/tiny
+silly/tst-atomic
+silly/tst-atomic-long
stat/memcmp-stat
stat/stat
stat/stat64
diff --git a/test/silly/Makefile.in b/test/silly/Makefile.in
index bb08e0db9..9bb4032b5 100644
--- a/test/silly/Makefile.in
+++ b/test/silly/Makefile.in
@@ -3,3 +3,6 @@
RET_hello := 42
RET_tiny := 42
+
+# missing internal headers, disable these
+GLIBC_TESTS_DISABLED := tst-atomic_glibc tst-atomic-long_glibc
diff --git a/test/silly/hello.c b/test/silly/hello.c
index 4aba926e6..d330597ab 100644
--- a/test/silly/hello.c
+++ b/test/silly/hello.c
@@ -1,4 +1,4 @@
-#include<stdio.h>
+#include <stdio.h>
#include <stdlib.h>
int main(void)
diff --git a/test/silly/tst-atomic-long.c b/test/silly/tst-atomic-long.c
new file mode 100644
index 000000000..d13fb074c
--- /dev/null
+++ b/test/silly/tst-atomic-long.c
@@ -0,0 +1,27 @@
+/* Tests for atomic.h macros.
+ Copyright (C) 2003-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <bits/wordsize.h>
+
+#define atomic_t long
+#if __WORDSIZE == 64
+# define TEST_ATOMIC64 1
+#endif
+
+#include "tst-atomic.c"
diff --git a/test/silly/tst-atomic.c b/test/silly/tst-atomic.c
new file mode 100644
index 000000000..fc773b231
--- /dev/null
+++ b/test/silly/tst-atomic.c
@@ -0,0 +1,573 @@
+/* Tests for atomic.h macros.
+ Copyright (C) 2003-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <stdio.h>
+#include <atomic.h>
+
+#ifndef atomic_t
+# define atomic_t int
+#endif
+
+#define CHK2(f,a1,a2,rv,new_mem) \
+ retval = f(&mem, a1, a2); \
+ if (retval != rv) { \
+ printf("%s(mem, %lu, %lu): retval %lu != expected %lu\n", \
+ #f, a1, a2, retval, rv); \
+ ret = 1; \
+ } \
+ if (mem != new_mem) { \
+ printf("%s(mem, %lu, %lu): mem %lu != expected %lu\n", \
+ #f, a1, a2, mem, new_mem); \
+ ret = 1; \
+ }
+#define CHK1(f,a1,rv,new_mem) \
+ retval = f(&mem, a1); \
+ if (retval != rv) { \
+ printf("%s(mem, %lu): retval %lu != expected %lu\n", \
+ #f, a1, retval, rv); \
+ ret = 1; \
+ } \
+ if (mem != new_mem) { \
+ printf("%s(mem, %lu): mem %lu != expected %lu\n", \
+ #f, a1, mem, new_mem); \
+ ret = 1; \
+ }
+#define CHK0(f,rv,new_mem) \
+ retval = f(&mem); \
+ if (retval != rv) { \
+ printf("%s(mem): retval %lu != expected %lu\n", \
+ #f, retval, rv); \
+ ret = 1; \
+ } \
+ if (mem != new_mem) { \
+ printf("%s(mem): mem %lu != expected %lu\n", \
+ #f, mem, new_mem); \
+ ret = 1; \
+ }
+
+/* Test various atomic.h macros. */
+static int
+do_test (void)
+{
+ atomic_t mem, expected, retval;
+ int ret = 0;
+
+#ifdef atomic_compare_and_exchange_val_acq
+ mem = 24;
+ CHK2(atomic_compare_and_exchange_val_acq, 35, 24, 24, 35);
+ mem = 12;
+ CHK2(atomic_compare_and_exchange_val_acq, 10, 15, 12, 12);
+ mem = -15;
+ CHK2(atomic_compare_and_exchange_val_acq, -56, -15, -15, -56);
+ mem = -1;
+ CHK2(atomic_compare_and_exchange_val_acq, 17, 0, -1, -1);
+#endif
+
+ mem = 24;
+ CHK2(atomic_compare_and_exchange_bool_acq, 35, 24, 0, 35);
+ mem = 12;
+ CHK2(atomic_compare_and_exchange_bool_acq, 10, 15, 1, 12);
+ mem = -15;
+ CHK2(atomic_compare_and_exchange_bool_acq, -56, -15, 0, -56);
+ mem = -1;
+ CHK2(atomic_compare_and_exchange_bool_acq, 17, 0, 1, -1);
+
+ mem = 64;
+ CHK1(atomic_exchange_acq, 31, 64, 31);
+ mem = 2;
+ CHK1(atomic_exchange_and_add, 11, 2, 13);
+ mem = 2;
+ CHK1(atomic_exchange_and_add_acq, 11, 2, 13);
+ mem = 2;
+ CHK1(atomic_exchange_and_add_rel, 11, 2, 13);
+
+ mem = -21;
+ atomic_add (&mem, 22);
+ if (mem != 1)
+ {
+ printf ("atomic_add(mem, 22): mem %lu != expected 1\n", mem);
+ ret = 1;
+ }
+
+ mem = -1;
+ atomic_increment (&mem);
+ if (mem != 0)
+ {
+ printf ("atomic_increment(mem): mem %lu != expected 0\n", mem);
+ ret = 1;
+ }
+
+ mem = 2;
+ if ((retval = atomic_increment_val (&mem)) != 3)
+ {
+ printf ("atomic_increment_val(mem): retval %lu != expected 3\n", retval);
+ ret = 1;
+ }
+
+ mem = 0;
+ CHK0(atomic_increment_and_test, 0, 1);
+ mem = 35;
+ CHK0(atomic_increment_and_test, 0, 36);
+ mem = -1;
+ CHK0(atomic_increment_and_test, 1, 0);
+ mem = 17;
+ atomic_decrement (&mem);
+ if (mem != 16)
+ {
+ printf ("atomic_increment(mem): mem %lu != expected 16\n", mem);
+ ret = 1;
+ }
+
+ if ((retval = atomic_decrement_val (&mem)) != 15)
+ {
+ printf ("atomic_decrement_val(mem): retval %lu != expected 15\n", retval);
+ ret = 1;
+ }
+
+ mem = 0;
+ CHK0(atomic_decrement_and_test, 0, -1);
+
+ mem = 15;
+ CHK0(atomic_decrement_and_test, 0, 14);
+ mem = 1;
+ CHK0(atomic_decrement_and_test, 1, 0);
+ mem = 1;
+ if (atomic_decrement_if_positive (&mem) != 1
+ || mem != 0)
+ {
+ puts ("atomic_decrement_if_positive test 1 failed");
+ ret = 1;
+ }
+
+ mem = 0;
+ if (atomic_decrement_if_positive (&mem) != 0
+ || mem != 0)
+ {
+ puts ("atomic_decrement_if_positive test 2 failed");
+ ret = 1;
+ }
+
+ mem = -1;
+ if (atomic_decrement_if_positive (&mem) != -1
+ || mem != -1)
+ {
+ puts ("atomic_decrement_if_positive test 3 failed");
+ ret = 1;
+ }
+
+ mem = -12;
+ if (! atomic_add_negative (&mem, 10)
+ || mem != -2)
+ {
+ puts ("atomic_add_negative test 1 failed");
+ ret = 1;
+ }
+
+ mem = 0;
+ if (atomic_add_negative (&mem, 100)
+ || mem != 100)
+ {
+ puts ("atomic_add_negative test 2 failed");
+ ret = 1;
+ }
+
+ mem = 15;
+ if (atomic_add_negative (&mem, -10)
+ || mem != 5)
+ {
+ puts ("atomic_add_negative test 3 failed");
+ ret = 1;
+ }
+
+ mem = -12;
+ if (atomic_add_negative (&mem, 14)
+ || mem != 2)
+ {
+ puts ("atomic_add_negative test 4 failed");
+ ret = 1;
+ }
+
+ mem = 0;
+ if (! atomic_add_negative (&mem, -1)
+ || mem != -1)
+ {
+ puts ("atomic_add_negative test 5 failed");
+ ret = 1;
+ }
+
+ mem = -31;
+ if (atomic_add_negative (&mem, 31)
+ || mem != 0)
+ {
+ puts ("atomic_add_negative test 6 failed");
+ ret = 1;
+ }
+
+ mem = -34;
+ if (atomic_add_zero (&mem, 31)
+ || mem != -3)
+ {
+ puts ("atomic_add_zero test 1 failed");
+ ret = 1;
+ }
+
+ mem = -36;
+ if (! atomic_add_zero (&mem, 36)
+ || mem != 0)
+ {
+ puts ("atomic_add_zero test 2 failed");
+ ret = 1;
+ }
+
+ mem = 113;
+ if (atomic_add_zero (&mem, -13)
+ || mem != 100)
+ {
+ puts ("atomic_add_zero test 3 failed");
+ ret = 1;
+ }
+
+ mem = -18;
+ if (atomic_add_zero (&mem, 20)
+ || mem != 2)
+ {
+ puts ("atomic_add_zero test 4 failed");
+ ret = 1;
+ }
+
+ mem = 10;
+ if (atomic_add_zero (&mem, -20)
+ || mem != -10)
+ {
+ puts ("atomic_add_zero test 5 failed");
+ ret = 1;
+ }
+
+ mem = 10;
+ if (! atomic_add_zero (&mem, -10)
+ || mem != 0)
+ {
+ puts ("atomic_add_zero test 6 failed");
+ ret = 1;
+ }
+
+ mem = 0;
+ atomic_bit_set (&mem, 1);
+ if (mem != 2)
+ {
+ puts ("atomic_bit_set test 1 failed");
+ ret = 1;
+ }
+
+ mem = 8;
+ atomic_bit_set (&mem, 3);
+ if (mem != 8)
+ {
+ puts ("atomic_bit_set test 2 failed");
+ ret = 1;
+ }
+
+#ifdef TEST_ATOMIC64
+ mem = 16;
+ atomic_bit_set (&mem, 35);
+ if (mem != 0x800000010LL)
+ {
+ puts ("atomic_bit_set test 3 failed");
+ ret = 1;
+ }
+#endif
+
+ mem = 0;
+ if (atomic_bit_test_set (&mem, 1)
+ || mem != 2)
+ {
+ puts ("atomic_bit_test_set test 1 failed");
+ ret = 1;
+ }
+
+ mem = 8;
+ if (! atomic_bit_test_set (&mem, 3)
+ || mem != 8)
+ {
+ puts ("atomic_bit_test_set test 2 failed");
+ ret = 1;
+ }
+
+#ifdef TEST_ATOMIC64
+ mem = 16;
+ if (atomic_bit_test_set (&mem, 35)
+ || mem != 0x800000010LL)
+ {
+ puts ("atomic_bit_test_set test 3 failed");
+ ret = 1;
+ }
+
+ mem = 0x100000000LL;
+ if (! atomic_bit_test_set (&mem, 32)
+ || mem != 0x100000000LL)
+ {
+ puts ("atomic_bit_test_set test 4 failed");
+ ret = 1;
+ }
+#endif
+
+#ifdef catomic_compare_and_exchange_val_acq
+ mem = 24;
+ if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
+ || mem != 35)
+ {
+ puts ("catomic_compare_and_exchange_val_acq test 1 failed");
+ ret = 1;
+ }
+
+ mem = 12;
+ if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
+ || mem != 12)
+ {
+ puts ("catomic_compare_and_exchange_val_acq test 2 failed");
+ ret = 1;
+ }
+
+ mem = -15;
+ if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
+ || mem != -56)
+ {
+ puts ("catomic_compare_and_exchange_val_acq test 3 failed");
+ ret = 1;
+ }
+
+ mem = -1;
+ if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
+ || mem != -1)
+ {
+ puts ("catomic_compare_and_exchange_val_acq test 4 failed");
+ ret = 1;
+ }
+#endif
+
+ mem = 24;
+ if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24)
+ || mem != 35)
+ {
+ puts ("catomic_compare_and_exchange_bool_acq test 1 failed");
+ ret = 1;
+ }
+
+ mem = 12;
+ if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15)
+ || mem != 12)
+ {
+ puts ("catomic_compare_and_exchange_bool_acq test 2 failed");
+ ret = 1;
+ }
+
+ mem = -15;
+ if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15)
+ || mem != -56)
+ {
+ puts ("catomic_compare_and_exchange_bool_acq test 3 failed");
+ ret = 1;
+ }
+
+ mem = -1;
+ if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0)
+ || mem != -1)
+ {
+ puts ("catomic_compare_and_exchange_bool_acq test 4 failed");
+ ret = 1;
+ }
+
+ mem = 2;
+ if (catomic_exchange_and_add (&mem, 11) != 2
+ || mem != 13)
+ {
+ puts ("catomic_exchange_and_add test failed");
+ ret = 1;
+ }
+
+ mem = -21;
+ catomic_add (&mem, 22);
+ if (mem != 1)
+ {
+ puts ("catomic_add test failed");
+ ret = 1;
+ }
+
+ mem = -1;
+ catomic_increment (&mem);
+ if (mem != 0)
+ {
+ puts ("catomic_increment test failed");
+ ret = 1;
+ }
+
+ mem = 2;
+ if (catomic_increment_val (&mem) != 3)
+ {
+ puts ("catomic_increment_val test failed");
+ ret = 1;
+ }
+
+ mem = 17;
+ catomic_decrement (&mem);
+ if (mem != 16)
+ {
+ puts ("catomic_decrement test failed");
+ ret = 1;
+ }
+
+ if (catomic_decrement_val (&mem) != 15)
+ {
+ puts ("catomic_decrement_val test failed");
+ ret = 1;
+ }
+
+ /* Tests for C11-like atomics. */
+ mem = 11;
+ if (atomic_load_relaxed (&mem) != 11 || atomic_load_acquire (&mem) != 11)
+ {
+ puts ("atomic_load_{relaxed,acquire} test failed");
+ ret = 1;
+ }
+
+ atomic_store_relaxed (&mem, 12);
+ if (mem != 12)
+ {
+ puts ("atomic_store_relaxed test failed");
+ ret = 1;
+ }
+ atomic_store_release (&mem, 13);
+ if (mem != 13)
+ {
+ puts ("atomic_store_release test failed");
+ ret = 1;
+ }
+
+ mem = 14;
+ expected = 14;
+ if (!atomic_compare_exchange_weak_relaxed (&mem, &expected, 25)
+ || mem != 25 || expected != 14)
+ {
+ puts ("atomic_compare_exchange_weak_relaxed test 1 failed");
+ ret = 1;
+ }
+ if (atomic_compare_exchange_weak_relaxed (&mem, &expected, 14)
+ || mem != 25 || expected != 25)
+ {
+ puts ("atomic_compare_exchange_weak_relaxed test 2 failed");
+ ret = 1;
+ }
+ mem = 14;
+ expected = 14;
+ if (!atomic_compare_exchange_weak_acquire (&mem, &expected, 25)
+ || mem != 25 || expected != 14)
+ {
+ puts ("atomic_compare_exchange_weak_acquire test 1 failed");
+ ret = 1;
+ }
+ if (atomic_compare_exchange_weak_acquire (&mem, &expected, 14)
+ || mem != 25 || expected != 25)
+ {
+ puts ("atomic_compare_exchange_weak_acquire test 2 failed");
+ ret = 1;
+ }
+ mem = 14;
+ expected = 14;
+ if (!atomic_compare_exchange_weak_release (&mem, &expected, 25)
+ || mem != 25 || expected != 14)
+ {
+ puts ("atomic_compare_exchange_weak_release test 1 failed");
+ ret = 1;
+ }
+ if (atomic_compare_exchange_weak_release (&mem, &expected, 14)
+ || mem != 25 || expected != 25)
+ {
+ puts ("atomic_compare_exchange_weak_release test 2 failed");
+ ret = 1;
+ }
+
+ mem = 23;
+ if (atomic_exchange_acquire (&mem, 42) != 23 || mem != 42)
+ {
+ puts ("atomic_exchange_acquire test failed");
+ ret = 1;
+ }
+ mem = 23;
+ if (atomic_exchange_release (&mem, 42) != 23 || mem != 42)
+ {
+ puts ("atomic_exchange_release test failed");
+ ret = 1;
+ }
+
+ mem = 23;
+ if (atomic_fetch_add_relaxed (&mem, 1) != 23 || mem != 24)
+ {
+ puts ("atomic_fetch_add_relaxed test failed");
+ ret = 1;
+ }
+ mem = 23;
+ if (atomic_fetch_add_acquire (&mem, 1) != 23 || mem != 24)
+ {
+ puts ("atomic_fetch_add_acquire test failed");
+ ret = 1;
+ }
+ mem = 23;
+ if (atomic_fetch_add_release (&mem, 1) != 23 || mem != 24)
+ {
+ puts ("atomic_fetch_add_release test failed");
+ ret = 1;
+ }
+ mem = 23;
+ if (atomic_fetch_add_acq_rel (&mem, 1) != 23 || mem != 24)
+ {
+ puts ("atomic_fetch_add_acq_rel test failed");
+ ret = 1;
+ }
+
+ mem = 3;
+ if (atomic_fetch_and_acquire (&mem, 2) != 3 || mem != 2)
+ {
+ puts ("atomic_fetch_and_acquire test failed");
+ ret = 1;
+ }
+
+ mem = 4;
+ if (atomic_fetch_or_relaxed (&mem, 2) != 4 || mem != 6)
+ {
+ puts ("atomic_fetch_or_relaxed test failed");
+ ret = 1;
+ }
+ mem = 4;
+ if (atomic_fetch_or_acquire (&mem, 2) != 4 || mem != 6)
+ {
+ puts ("atomic_fetch_or_acquire test failed");
+ ret = 1;
+ }
+
+ /* This is a single-threaded test, so we can't test the effects of the
+ fences. */
+ atomic_thread_fence_acquire ();
+ atomic_thread_fence_release ();
+ atomic_thread_fence_seq_cst ();
+
+ return ret;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"