summaryrefslogtreecommitdiff
path: root/libc/sysdeps/linux/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'libc/sysdeps/linux/powerpc')
-rw-r--r--libc/sysdeps/linux/powerpc/bits/atomic.h8
-rw-r--r--libc/sysdeps/linux/powerpc/fpu_control.h4
2 files changed, 6 insertions, 6 deletions
diff --git a/libc/sysdeps/linux/powerpc/bits/atomic.h b/libc/sysdeps/linux/powerpc/bits/atomic.h
index 447195538..977bda72f 100644
--- a/libc/sysdeps/linux/powerpc/bits/atomic.h
+++ b/libc/sysdeps/linux/powerpc/bits/atomic.h
@@ -235,7 +235,7 @@
/*
* All powerpc64 processors support the new "light weight" sync (lwsync).
*/
-# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
+# define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
/*
* "light weight" sync can also be used for the release barrier.
*/
@@ -340,7 +340,7 @@
* sync (lwsync). So the only safe option is to use normal sync
* for all powerpc32 applications.
*/
-# define atomic_read_barrier() __asm ("sync" ::: "memory")
+# define atomic_read_barrier() __asm__ ("sync" ::: "memory")
#endif
@@ -387,8 +387,8 @@ typedef uintmax_t uatomic_max_t;
# endif
#endif
-#define atomic_full_barrier() __asm ("sync" ::: "memory")
-#define atomic_write_barrier() __asm ("eieio" ::: "memory")
+#define atomic_full_barrier() __asm__ ("sync" ::: "memory")
+#define atomic_write_barrier() __asm__ ("eieio" ::: "memory")
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
({ \
diff --git a/libc/sysdeps/linux/powerpc/fpu_control.h b/libc/sysdeps/linux/powerpc/fpu_control.h
index 8a906ecaa..442da4721 100644
--- a/libc/sysdeps/linux/powerpc/fpu_control.h
+++ b/libc/sysdeps/linux/powerpc/fpu_control.h
@@ -57,11 +57,11 @@ typedef unsigned int fpu_control_t __attribute__ ((__mode__ (__SI__)));
/* Macros for accessing the hardware control word. */
#define _FPU_GETCW(__cw) ({ \
unsigned int env; \
- asm volatile ("mfspefscr %0" : "=r" (env)); \
+ __asm__ __volatile__ ("mfspefscr %0" : "=r" (env)); \
(__cw) = env; })
#define _FPU_SETCW(__cw) ({ \
unsigned int env = __cw; \
- asm volatile ("mtspefscr %0" : : "r" (env)); })
+ __asm__ __volatile__ ("mtspefscr %0" : : "r" (env)); })
#else
#define _FPU_RESERVED 0xffffff00 /* These bits are reserved are not changed. */
/* IEEE: same as above, but (some) exceptions;