diff options
author | Bernhard Reutner-Fischer <rep.dot.nop@gmail.com> | 2008-07-23 10:59:18 +0000 |
---|---|---|
committer | Bernhard Reutner-Fischer <rep.dot.nop@gmail.com> | 2008-07-23 10:59:18 +0000 |
commit | d35b0bc119816825a657f7c9c2a1f062e7048c39 (patch) | |
tree | 92094ee13ffb04d81a4f34674bb09da5f4b5b8ec | |
parent | 40ee4cb9496ed037957db9790e5bd87604d45fe5 (diff) |
- fix asm and volatile keywords
-rw-r--r-- | ldso/ldso/arm/dl-syscalls.h | 2 | ||||
-rw-r--r-- | ldso/ldso/cris/dl-startup.h | 4 | ||||
-rw-r--r-- | ldso/ldso/frv/dl-startup.h | 2 | ||||
-rw-r--r-- | ldso/ldso/m68k/dl-startup.h | 2 | ||||
-rw-r--r-- | ldso/ldso/mips/dl-startup.h | 2 | ||||
-rw-r--r-- | ldso/ldso/powerpc/dl-startup.h | 2 | ||||
-rw-r--r-- | ldso/ldso/sh/dl-startup.h | 2 | ||||
-rw-r--r-- | ldso/ldso/sh64/dl-startup.h | 2 | ||||
-rw-r--r-- | ldso/ldso/sparc/dl-startup.h | 2 | ||||
-rw-r--r-- | libc/sysdeps/linux/i386/bits/atomic.h | 76 | ||||
-rw-r--r-- | libc/sysdeps/linux/i386/bits/mathinline.h | 20 | ||||
-rw-r--r-- | libc/sysdeps/linux/i386/bits/sigcontextinfo.h | 2 | ||||
-rw-r--r-- | libc/sysdeps/linux/ia64/bits/syscalls.h | 2 | ||||
-rw-r--r-- | libc/sysdeps/linux/m68k/bits/mathinline.h | 24 | ||||
-rw-r--r-- | libc/sysdeps/linux/powerpc/bits/atomic.h | 44 | ||||
-rw-r--r-- | libc/sysdeps/linux/powerpc/bits/mathinline.h | 4 | ||||
-rw-r--r-- | libc/sysdeps/linux/sh/bits/atomic.h | 42 | ||||
-rw-r--r-- | libc/sysdeps/linux/x86_64/bits/atomic.h | 98 | ||||
-rw-r--r-- | libpthread/linuxthreads.old/spinlock.c | 4 |
19 files changed, 168 insertions, 168 deletions
diff --git a/ldso/ldso/arm/dl-syscalls.h b/ldso/ldso/arm/dl-syscalls.h index ecbe96a1c..1c0e6699e 100644 --- a/ldso/ldso/arm/dl-syscalls.h +++ b/ldso/ldso/arm/dl-syscalls.h @@ -9,7 +9,7 @@ extern int _dl_errno; * provides them, so define them here (only required for thumb). */ #if defined(__thumb__) -asm( +__asm__( ".macro call_via register\n" " .global _call_via_\\register\n" " .hidden _call_via_\\register\n" diff --git a/ldso/ldso/cris/dl-startup.h b/ldso/ldso/cris/dl-startup.h index 417cb5096..832c3528b 100644 --- a/ldso/ldso/cris/dl-startup.h +++ b/ldso/ldso/cris/dl-startup.h @@ -6,7 +6,7 @@ * can find argc, argv and auxvt (Auxillary Vector Table). */ #ifdef __arch_v32 -asm("" \ +__asm__("" \ " .text\n" \ " .globl _start\n" \ " .type _start,@function\n" \ @@ -24,7 +24,7 @@ asm("" \ #else -asm("" \ +__asm__("" \ " .text\n" \ " .globl _start\n" \ " .type _start,@function\n" \ diff --git a/ldso/ldso/frv/dl-startup.h b/ldso/ldso/frv/dl-startup.h index 00c7592e1..674f81c15 100644 --- a/ldso/ldso/frv/dl-startup.h +++ b/ldso/ldso/frv/dl-startup.h @@ -25,7 +25,7 @@ __self_reloc returns the relocated pointer to us, so that we can use this value to initialize the PIC register. */ -asm("" \ +__asm__("" \ " .text\n" \ " .global _dl_boot\n" \ " .type _dl_boot,@function\n" \ diff --git a/ldso/ldso/m68k/dl-startup.h b/ldso/ldso/m68k/dl-startup.h index fca4b6ccb..2ed9ead50 100644 --- a/ldso/ldso/m68k/dl-startup.h +++ b/ldso/ldso/m68k/dl-startup.h @@ -4,7 +4,7 @@ * Copyright (C) 2005 by Erik Andersen <andersen@codepoet.org> */ -asm ("\ +__asm__ ("\ .text\n\ .globl _start\n\ .type _start,@function\n\ diff --git a/ldso/ldso/mips/dl-startup.h b/ldso/ldso/mips/dl-startup.h index 4e3fcafb8..d76f4874d 100644 --- a/ldso/ldso/mips/dl-startup.h +++ b/ldso/ldso/mips/dl-startup.h @@ -7,7 +7,7 @@ #include <sgidefs.h> -asm("" +__asm__("" " .text\n" " .globl _start\n" " .ent _start\n" diff --git a/ldso/ldso/powerpc/dl-startup.h b/ldso/ldso/powerpc/dl-startup.h index a5a8a83f2..7033da32b 100644 --- a/ldso/ldso/powerpc/dl-startup.h +++ b/ldso/ldso/powerpc/dl-startup.h @@ -4,7 +4,7 @@ * Copyright (C) 2005 by Joakim Tjernlund */ -asm( +__asm__( " .text\n" " .globl _start\n" " .type _start,@function\n" diff --git a/ldso/ldso/sh/dl-startup.h b/ldso/ldso/sh/dl-startup.h index bd51cebe0..3e59093fa 100644 --- a/ldso/ldso/sh/dl-startup.h +++ b/ldso/ldso/sh/dl-startup.h @@ -2,7 +2,7 @@ * will work as expected and cope with whatever platform specific wierdness is * needed for this architecture. */ -asm( +__asm__( " .text\n" " .globl _start\n" " .type _start,@function\n" diff --git a/ldso/ldso/sh64/dl-startup.h b/ldso/ldso/sh64/dl-startup.h index 7701afc71..8c0a58df8 100644 --- a/ldso/ldso/sh64/dl-startup.h +++ b/ldso/ldso/sh64/dl-startup.h @@ -3,7 +3,7 @@ * needed for this architecture. */ -asm("" \ +__asm__("" \ " .section .text..SHmedia32,\"ax\"\n" \ " .globl _start\n" \ " .type _start, @function\n" \ diff --git a/ldso/ldso/sparc/dl-startup.h b/ldso/ldso/sparc/dl-startup.h index 4ceb7c55a..c310e5df9 100644 --- a/ldso/ldso/sparc/dl-startup.h +++ b/ldso/ldso/sparc/dl-startup.h @@ -4,7 +4,7 @@ * can be done. */ -asm ("\ +__asm__ ("\ .text\n\ .global _start\n\ .type _start,%function\n\ diff --git a/libc/sysdeps/linux/i386/bits/atomic.h b/libc/sysdeps/linux/i386/bits/atomic.h index 33279af5f..a20f424f8 100644 --- a/libc/sysdeps/linux/i386/bits/atomic.h +++ b/libc/sysdeps/linux/i386/bits/atomic.h @@ -60,21 +60,21 @@ typedef uintmax_t uatomic_max_t; #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "cmpxchgb %b2, %1" \ : "=a" (ret), "=m" (*mem) \ : "q" (newval), "m" (*mem), "0" (oldval)); \ ret; }) #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "cmpxchgw %w2, %1" \ : "=a" (ret), "=m" (*mem) \ : "r" (newval), "m" (*mem), "0" (oldval)); \ ret; }) #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %1" \ : "=a" (ret), "=m" (*mem) \ : "r" (newval), "m" (*mem), "0" (oldval)); \ ret; }) @@ -92,7 +92,7 @@ typedef uintmax_t uatomic_max_t; # ifdef __PIC__ # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile ("xchgl %2, %%ebx\n\t" \ + __asm__ __volatile__ ("xchgl %2, %%ebx\n\t" \ LOCK_PREFIX "cmpxchg8b %1\n\t" \ "xchgl %2, %%ebx" \ : "=A" (ret), "=m" (*mem) \ @@ -106,7 +106,7 @@ typedef uintmax_t uatomic_max_t; # else # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchg8b %1" \ + __asm__ __volatile__ (LOCK_PREFIX "cmpxchg8b %1" \ : "=A" (ret), "=m" (*mem) \ : "b" (((unsigned long long int) (newval)) \ & 0xffffffff), \ @@ -123,15 +123,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_exchange_acq(mem, newvalue) \ ({ __typeof (*mem) result; \ if (sizeof (*mem) == 1) \ - __asm __volatile ("xchgb %b0, %1" \ + __asm__ __volatile__ ("xchgb %b0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (newvalue), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile ("xchgw %w0, %1" \ + __asm__ __volatile__ ("xchgw %w0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (newvalue), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile ("xchgl %0, %1" \ + __asm__ __volatile__ ("xchgl %0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (newvalue), "m" (*mem)); \ else \ @@ -146,15 +146,15 @@ typedef uintmax_t uatomic_max_t; ({ __typeof (*mem) __result; \ __typeof (value) __addval = (value); \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "xaddb %b0, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "xaddb %b0, %1" \ : "=r" (__result), "=m" (*mem) \ : "0" (__addval), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "xaddw %w0, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "xaddw %w0, %1" \ : "=r" (__result), "=m" (*mem) \ : "0" (__addval), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "xaddl %0, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "xaddl %0, %1" \ : "=r" (__result), "=m" (*mem) \ : "0" (__addval), "m" (*mem)); \ else \ @@ -176,15 +176,15 @@ typedef uintmax_t uatomic_max_t; else if (__builtin_constant_p (value) && (value) == -1) \ atomic_decrement (mem); \ else if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b1, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "addb %b1, %0" \ : "=m" (*mem) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w1, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "addw %w1, %0" \ : "=m" (*mem) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %1, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "addl %1, %0" \ : "=m" (*mem) \ : "ir" (value), "m" (*mem)); \ else \ @@ -204,15 +204,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_add_negative(mem, value) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addb %b2, %0; sets %1" \ : "=m" (*mem), "=qm" (__result) \ : "iq" (value), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addw %w2, %0; sets %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addl %2, %0; sets %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else \ @@ -223,15 +223,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_add_zero(mem, value) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addb %b2, %0; setz %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addw %w2, %0; setz %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addl %2, %0; setz %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else \ @@ -241,15 +241,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_increment(mem) \ (void) ({ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "incb %b0" \ + __asm__ __volatile__ (LOCK_PREFIX "incb %b0" \ : "=m" (*mem) \ : "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "incw %w0" \ + __asm__ __volatile__ (LOCK_PREFIX "incw %w0" \ : "=m" (*mem) \ : "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "incl %0" \ + __asm__ __volatile__ (LOCK_PREFIX "incl %0" \ : "=m" (*mem) \ : "m" (*mem)); \ else \ @@ -268,15 +268,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_increment_and_test(mem) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "incb %0; sete %b1" \ + __asm__ __volatile__ (LOCK_PREFIX "incb %0; sete %b1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "incw %0; sete %w1" \ + __asm__ __volatile__ (LOCK_PREFIX "incw %0; sete %w1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "incl %0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else \ @@ -286,15 +286,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_decrement(mem) \ (void) ({ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "decb %b0" \ + __asm__ __volatile__ (LOCK_PREFIX "decb %b0" \ : "=m" (*mem) \ : "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "decw %w0" \ + __asm__ __volatile__ (LOCK_PREFIX "decw %w0" \ : "=m" (*mem) \ : "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "decl %0" \ + __asm__ __volatile__ (LOCK_PREFIX "decl %0" \ : "=m" (*mem) \ : "m" (*mem)); \ else \ @@ -313,15 +313,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_decrement_and_test(mem) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "decb %b0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "decw %w0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "decl %0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else \ @@ -331,15 +331,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_bit_set(mem, bit) \ (void) ({ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "orb %b2, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "orb %b2, %0" \ : "=m" (*mem) \ : "m" (*mem), "ir" (1 << (bit))); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "orw %w2, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "orw %w2, %0" \ : "=m" (*mem) \ : "m" (*mem), "ir" (1 << (bit))); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "orl %2, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "orl %2, %0" \ : "=m" (*mem) \ : "m" (*mem), "ir" (1 << (bit))); \ else \ @@ -350,15 +350,15 @@ typedef uintmax_t uatomic_max_t; #define atomic_bit_test_set(mem, bit) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \ + __asm__ __volatile__ (LOCK_PREFIX "btsb %3, %1; setc %0" \ : "=q" (__result), "=m" (*mem) \ : "m" (*mem), "ir" (bit)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \ + __asm__ __volatile__ (LOCK_PREFIX "btsw %3, %1; setc %0" \ : "=q" (__result), "=m" (*mem) \ : "m" (*mem), "ir" (bit)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \ + __asm__ __volatile__ (LOCK_PREFIX "btsl %3, %1; setc %0" \ : "=q" (__result), "=m" (*mem) \ : "m" (*mem), "ir" (bit)); \ else \ diff --git a/libc/sysdeps/linux/i386/bits/mathinline.h b/libc/sysdeps/linux/i386/bits/mathinline.h index e04ce9509..879b76b40 100644 --- a/libc/sysdeps/linux/i386/bits/mathinline.h +++ b/libc/sysdeps/linux/i386/bits/mathinline.h @@ -206,7 +206,7 @@ __NTH (__signbitl (long double __x)) __MATH_INLINE float_type __NTH (func (float_type __x)) \ { \ register float_type __result; \ - __asm __volatile__ (op : "=t" (__result) : params); \ + __asm__ __volatile__ (op : "=t" (__result) : params); \ return __result; \ } @@ -531,22 +531,22 @@ __inline_mathcodeNP (floor, __x, \ register long double __value; \ __volatile unsigned short int __cw; \ __volatile unsigned short int __cwtmp; \ - __asm __volatile ("fnstcw %0" : "=m" (__cw)); \ + __asm__ __volatile__ ("fnstcw %0" : "=m" (__cw)); \ __cwtmp = (__cw & 0xf3ff) | 0x0400; /* rounding down */ \ - __asm __volatile ("fldcw %0" : : "m" (__cwtmp)); \ - __asm __volatile ("frndint" : "=t" (__value) : "0" (__x)); \ - __asm __volatile ("fldcw %0" : : "m" (__cw)); \ + __asm__ __volatile__ ("fldcw %0" : : "m" (__cwtmp)); \ + __asm__ __volatile__ ("frndint" : "=t" (__value) : "0" (__x)); \ + __asm__ __volatile__ ("fldcw %0" : : "m" (__cw)); \ return __value) __inline_mathcodeNP (ceil, __x, \ register long double __value; \ __volatile unsigned short int __cw; \ __volatile unsigned short int __cwtmp; \ - __asm __volatile ("fnstcw %0" : "=m" (__cw)); \ + __asm__ __volatile__ ("fnstcw %0" : "=m" (__cw)); \ __cwtmp = (__cw & 0xf3ff) | 0x0800; /* rounding up */ \ - __asm __volatile ("fldcw %0" : : "m" (__cwtmp)); \ - __asm __volatile ("frndint" : "=t" (__value) : "0" (__x)); \ - __asm __volatile ("fldcw %0" : : "m" (__cw)); \ + __asm__ __volatile__ ("fldcw %0" : : "m" (__cwtmp)); \ + __asm__ __volatile__ ("frndint" : "=t" (__value) : "0" (__x)); \ + __asm__ __volatile__ ("fldcw %0" : : "m" (__cw)); \ return __value) #ifdef __FAST_MATH__ @@ -746,7 +746,7 @@ __inline_mathcode (__acosh1p, __x, \ __inline_mathop (__ieee754_sqrt, "fsqrt") __inline_mathcode2 (__ieee754_atan2, __y, __x, register long double __value; - __asm __volatile__ ("fpatan\n\t" + __asm__ __volatile__ ("fpatan\n\t" : "=t" (__value) : "0" (__x), "u" (__y) : "st(1)"); return __value;) diff --git a/libc/sysdeps/linux/i386/bits/sigcontextinfo.h b/libc/sysdeps/linux/i386/bits/sigcontextinfo.h index 6530ba6f3..b7367bac6 100644 --- a/libc/sysdeps/linux/i386/bits/sigcontextinfo.h +++ b/libc/sysdeps/linux/i386/bits/sigcontextinfo.h @@ -25,7 +25,7 @@ #define CALL_SIGHANDLER(handler, signo, ctx) \ do { \ int __tmp1, __tmp2, __tmp3, __tmp4; \ - __asm __volatile ("movl\t%%esp, %%edi\n\t" \ + __asm__ __volatile__ ("movl\t%%esp, %%edi\n\t" \ "andl\t$-16, %%esp\n\t" \ "subl\t%8, %%esp\n\t" \ "movl\t%%edi, %c8-4(%%esp)\n\t" \ diff --git a/libc/sysdeps/linux/ia64/bits/syscalls.h b/libc/sysdeps/linux/ia64/bits/syscalls.h index 4e8a305ea..0c3d6ca3a 100644 --- a/libc/sysdeps/linux/ia64/bits/syscalls.h +++ b/libc/sysdeps/linux/ia64/bits/syscalls.h @@ -45,7 +45,7 @@ register long _r15 __asm__ ("r15") = SYS_ify(name); \ long _retval; \ LOAD_REGS_##nr \ - __asm __volatile ("break " ___IA64_BREAK_SYSCALL ";;\n\t" \ + __asm__ __volatile__ ("break " ___IA64_BREAK_SYSCALL ";;\n\t" \ : "=r" (_r8), "=r" (_r10), "=r" (_r15) ASM_OUTARGS_##nr \ : "2" (_r15) ASM_ARGS_##nr \ : "memory" ASM_CLOBBERS_##nr); \ diff --git a/libc/sysdeps/linux/m68k/bits/mathinline.h b/libc/sysdeps/linux/m68k/bits/mathinline.h index 8cc21694b..f3166000d 100644 --- a/libc/sysdeps/linux/m68k/bits/mathinline.h +++ b/libc/sysdeps/linux/m68k/bits/mathinline.h @@ -179,14 +179,14 @@ __m81_defun (float_type, __CONCAT(__floor,s), (float_type __x)) \ { \ float_type __result; \ unsigned long int __ctrl_reg; \ - __asm __volatile__ ("fmove%.l %!, %0" : "=dm" (__ctrl_reg)); \ + __asm__ __volatile__ ("fmove%.l %!, %0" : "=dm" (__ctrl_reg)); \ /* Set rounding towards negative infinity. */ \ - __asm __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ + __asm__ __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ : "dmi" ((__ctrl_reg & ~0x10) | 0x20)); \ /* Convert X to an integer, using -Inf rounding. */ \ - __asm __volatile__ ("fint%.x %1, %0" : "=f" (__result) : "f" (__x)); \ + __asm__ __volatile__ ("fint%.x %1, %0" : "=f" (__result) : "f" (__x)); \ /* Restore the previous rounding mode. */ \ - __asm __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ + __asm__ __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ : "dmi" (__ctrl_reg)); \ return __result; \ } \ @@ -195,14 +195,14 @@ __m81_defun (float_type, __CONCAT(__ceil,s), (float_type __x)) \ { \ float_type __result; \ unsigned long int __ctrl_reg; \ - __asm __volatile__ ("fmove%.l %!, %0" : "=dm" (__ctrl_reg)); \ + __asm__ __volatile__ ("fmove%.l %!, %0" : "=dm" (__ctrl_reg)); \ /* Set rounding towards positive infinity. */ \ - __asm __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ + __asm__ __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ : "dmi" (__ctrl_reg | 0x30)); \ /* Convert X to an integer, using +Inf rounding. */ \ - __asm __volatile__ ("fint%.x %1, %0" : "=f" (__result) : "f" (__x)); \ + __asm__ __volatile__ ("fint%.x %1, %0" : "=f" (__result) : "f" (__x)); \ /* Restore the previous rounding mode. */ \ - __asm __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ + __asm__ __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ : "dmi" (__ctrl_reg)); \ return __result; \ } @@ -295,12 +295,12 @@ __m81_defun (float_type, __CONCAT(__nearbyint,s), (float_type __x)) \ { \ float_type __result; \ unsigned long int __ctrl_reg; \ - __asm __volatile__ ("fmove%.l %!, %0" : "=dm" (__ctrl_reg)); \ + __asm__ __volatile__ ("fmove%.l %!, %0" : "=dm" (__ctrl_reg)); \ /* Temporarily disable the inexact exception. */ \ - __asm __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ + __asm__ __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ : "dmi" (__ctrl_reg & ~0x200)); \ - __asm __volatile__ ("fint%.x %1, %0" : "=f" (__result) : "f" (__x)); \ - __asm __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ + __asm__ __volatile__ ("fint%.x %1, %0" : "=f" (__result) : "f" (__x)); \ + __asm__ __volatile__ ("fmove%.l %0, %!" : /* No outputs. */ \ : "dmi" (__ctrl_reg)); \ return __result; \ } \ diff --git a/libc/sysdeps/linux/powerpc/bits/atomic.h b/libc/sysdeps/linux/powerpc/bits/atomic.h index 977bda72f..d8a4ed33e 100644 --- a/libc/sysdeps/linux/powerpc/bits/atomic.h +++ b/libc/sysdeps/linux/powerpc/bits/atomic.h @@ -50,7 +50,7 @@ # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ ({ \ unsigned int __tmp, __tmp2; \ - __asm __volatile (" clrldi %1,%1,32\n" \ + __asm__ __volatile__ (" clrldi %1,%1,32\n" \ "1: lwarx %0,0,%2\n" \ " subf. %0,%1,%0\n" \ " bne 2f\n" \ @@ -66,7 +66,7 @@ # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ ({ \ unsigned int __tmp, __tmp2; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \ " clrldi %1,%1,32\n" \ "1: lwarx %0,0,%2\n" \ " subf. %0,%1,%0\n" \ @@ -88,7 +88,7 @@ # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ ({ \ unsigned long __tmp; \ - __asm __volatile ( \ + __asm__ __volatile__ ( \ "1: ldarx %0,0,%1\n" \ " subf. %0,%2,%0\n" \ " bne 2f\n" \ @@ -104,7 +104,7 @@ # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ ({ \ unsigned long __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \ "1: ldarx %0,0,%1\n" \ " subf. %0,%2,%0\n" \ " bne 2f\n" \ @@ -121,7 +121,7 @@ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ - __asm __volatile ( \ + __asm__ __volatile__ ( \ "1: ldarx %0,0,%1\n" \ " cmpd %0,%2\n" \ " bne 2f\n" \ @@ -138,7 +138,7 @@ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \ "1: ldarx %0,0,%1\n" \ " cmpd %0,%2\n" \ " bne 2f\n" \ @@ -154,7 +154,7 @@ # define __arch_atomic_exchange_64_acq(mem, value) \ ({ \ __typeof (*mem) __val; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \ "1: ldarx %0,0,%2\n" \ " stdcx. %3,0,%2\n" \ " bne- 1b\n" \ @@ -168,7 +168,7 @@ # define __arch_atomic_exchange_64_rel(mem, value) \ ({ \ __typeof (*mem) __val; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \ "1: ldarx %0,0,%2\n" \ " stdcx. %3,0,%2\n" \ " bne- 1b" \ @@ -181,7 +181,7 @@ # define __arch_atomic_exchange_and_add_64(mem, value) \ ({ \ __typeof (*mem) __val, __tmp; \ - __asm __volatile ("1: ldarx %0,0,%3\n" \ + __asm__ __volatile__ ("1: ldarx %0,0,%3\n" \ " add %1,%0,%4\n" \ " stdcx. %1,0,%3\n" \ " bne- 1b" \ @@ -194,7 +194,7 @@ # define __arch_atomic_increment_val_64(mem) \ ({ \ __typeof (*(mem)) __val; \ - __asm __volatile ("1: ldarx %0,0,%2\n" \ + __asm__ __volatile__ ("1: ldarx %0,0,%2\n" \ " addi %0,%0,1\n" \ " stdcx. %0,0,%2\n" \ " bne- 1b" \ @@ -207,7 +207,7 @@ # define __arch_atomic_decrement_val_64(mem) \ ({ \ __typeof (*(mem)) __val; \ - __asm __volatile ("1: ldarx %0,0,%2\n" \ + __asm__ __volatile__ ("1: ldarx %0,0,%2\n" \ " subi %0,%0,1\n" \ " stdcx. %0,0,%2\n" \ " bne- 1b" \ @@ -219,7 +219,7 @@ # define __arch_atomic_decrement_if_positive_64(mem) \ ({ int __val, __tmp; \ - __asm __volatile ("1: ldarx %0,0,%3\n" \ + __asm__ __volatile__ ("1: ldarx %0,0,%3\n" \ " cmpdi 0,%0,0\n" \ " addi %1,%0,-1\n" \ " ble 2f\n" \ @@ -273,7 +273,7 @@ # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ ({ \ unsigned int __tmp; \ - __asm __volatile ( \ + __asm__ __volatile__ ( \ "1: lwarx %0,0,%1\n" \ " subf. %0,%2,%0\n" \ " bne 2f\n" \ @@ -289,7 +289,7 @@ # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ ({ \ unsigned int __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \ "1: lwarx %0,0,%1\n" \ " subf. %0,%2,%0\n" \ " bne 2f\n" \ @@ -394,7 +394,7 @@ typedef uintmax_t uatomic_max_t; ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ - __asm __volatile ( \ + __asm__ __volatile__ ( \ "1: lwarx %0,0,%1\n" \ " cmpw %0,%2\n" \ " bne 2f\n" \ @@ -411,7 +411,7 @@ typedef uintmax_t uatomic_max_t; ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \ "1: lwarx %0,0,%1\n" \ " cmpw %0,%2\n" \ " bne 2f\n" \ @@ -427,7 +427,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_atomic_exchange_32_acq(mem, value) \ ({ \ __typeof (*mem) __val; \ - __asm __volatile ( \ + __asm__ __volatile__ ( \ "1: lwarx %0,0,%2\n" \ " stwcx. %3,0,%2\n" \ " bne- 1b\n" \ @@ -441,7 +441,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_atomic_exchange_32_rel(mem, value) \ ({ \ __typeof (*mem) __val; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm__ __volatile__ (__ARCH_REL_INSTR "\n" \ "1: lwarx %0,0,%2\n" \ " stwcx. %3,0,%2\n" \ " bne- 1b" \ @@ -454,7 +454,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_atomic_exchange_and_add_32(mem, value) \ ({ \ __typeof (*mem) __val, __tmp; \ - __asm __volatile ("1: lwarx %0,0,%3\n" \ + __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \ " add %1,%0,%4\n" \ " stwcx. %1,0,%3\n" \ " bne- 1b" \ @@ -467,7 +467,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_atomic_increment_val_32(mem) \ ({ \ __typeof (*(mem)) __val; \ - __asm __volatile ("1: lwarx %0,0,%2\n" \ + __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \ " addi %0,%0,1\n" \ " stwcx. %0,0,%2\n" \ " bne- 1b" \ @@ -480,7 +480,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_atomic_decrement_val_32(mem) \ ({ \ __typeof (*(mem)) __val; \ - __asm __volatile ("1: lwarx %0,0,%2\n" \ + __asm__ __volatile__ ("1: lwarx %0,0,%2\n" \ " subi %0,%0,1\n" \ " stwcx. %0,0,%2\n" \ " bne- 1b" \ @@ -492,7 +492,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_atomic_decrement_if_positive_32(mem) \ ({ int __val, __tmp; \ - __asm __volatile ("1: lwarx %0,0,%3\n" \ + __asm__ __volatile__ ("1: lwarx %0,0,%3\n" \ " cmpwi 0,%0,0\n" \ " addi %1,%0,-1\n" \ " ble 2f\n" \ diff --git a/libc/sysdeps/linux/powerpc/bits/mathinline.h b/libc/sysdeps/linux/powerpc/bits/mathinline.h index e2536a3cc..d1b05f388 100644 --- a/libc/sysdeps/linux/powerpc/bits/mathinline.h +++ b/libc/sysdeps/linux/powerpc/bits/mathinline.h @@ -148,7 +148,7 @@ __NTH (__ieee754_sqrt (double __x)) { /* Volatile is required to prevent the compiler from moving the fsqrt instruction above the branch. */ - __asm __volatile ( + __asm__ __volatile__ ( " fsqrt %0,%1\n" : "=f" (__z) : "f" (__x)); @@ -170,7 +170,7 @@ __NTH (__ieee754_sqrtf (float __x)) { /* Volatile is required to prevent the compiler from moving the fsqrts instruction above the branch. */ - __asm __volatile ( + __asm__ __volatile__ ( " fsqrts %0,%1\n" : "=f" (__z) : "f" (__x)); diff --git a/libc/sysdeps/linux/sh/bits/atomic.h b/libc/sysdeps/linux/sh/bits/atomic.h index a0e5918f0..6bb7255c5 100644 --- a/libc/sysdeps/linux/sh/bits/atomic.h +++ b/libc/sysdeps/linux/sh/bits/atomic.h @@ -67,7 +67,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __result; \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ nop\n\ @@ -84,7 +84,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __result; \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ nop\n\ @@ -101,7 +101,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __result; \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ nop\n\ @@ -127,7 +127,7 @@ typedef uintmax_t uatomic_max_t; #define atomic_exchange_and_add(mem, value) \ ({ __typeof (*(mem)) __result, __tmp, __value = (value); \ if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -139,7 +139,7 @@ typedef uintmax_t uatomic_max_t; : "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ : "r0", "r1", "memory"); \ else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -151,7 +151,7 @@ typedef uintmax_t uatomic_max_t; : "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ : "r0", "r1", "memory"); \ else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -176,7 +176,7 @@ typedef uintmax_t uatomic_max_t; #define atomic_add(mem, value) \ (void) ({ __typeof (*(mem)) __tmp, __value = (value); \ if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -188,7 +188,7 @@ typedef uintmax_t uatomic_max_t; : "=&r" (__tmp) : "r" (mem), "0" (__value) \ : "r0", "r1", "r2", "memory"); \ else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -200,7 +200,7 @@ typedef uintmax_t uatomic_max_t; : "=&r" (__tmp) : "r" (mem), "0" (__value) \ : "r0", "r1", "r2", "memory"); \ else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -227,7 +227,7 @@ typedef uintmax_t uatomic_max_t; ({ unsigned char __result; \ __typeof (*(mem)) __tmp, __value = (value); \ if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -241,7 +241,7 @@ typedef uintmax_t uatomic_max_t; : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ : "r0", "r1", "r2", "t", "memory"); \ else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -255,7 +255,7 @@ typedef uintmax_t uatomic_max_t; : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ : "r0", "r1", "r2", "t", "memory"); \ else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -276,7 +276,7 @@ typedef uintmax_t uatomic_max_t; ({ unsigned char __result; \ __typeof (*(mem)) __tmp, __value = (value); \ if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -290,7 +290,7 @@ typedef uintmax_t uatomic_max_t; : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ : "r0", "r1", "r2", "t", "memory"); \ else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -304,7 +304,7 @@ typedef uintmax_t uatomic_max_t; : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ : "r0", "r1", "r2", "t", "memory"); \ else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -327,7 +327,7 @@ typedef uintmax_t uatomic_max_t; #define atomic_bit_set(mem, bit) \ (void) ({ unsigned int __mask = 1 << (bit); \ if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -339,7 +339,7 @@ typedef uintmax_t uatomic_max_t; : : "r" (mem), "r" (__mask) \ : "r0", "r1", "r2", "memory"); \ else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -351,7 +351,7 @@ typedef uintmax_t uatomic_max_t; : : "r" (mem), "r" (__mask) \ : "r0", "r1", "r2", "memory"); \ else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -370,7 +370,7 @@ typedef uintmax_t uatomic_max_t; ({ unsigned int __mask = 1 << (bit); \ unsigned int __result = __mask; \ if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ nop\n\ @@ -385,7 +385,7 @@ typedef uintmax_t uatomic_max_t; : "r" (mem), "0" (__result), "1" (__mask) \ : "r0", "r1", "r2", "memory"); \ else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ nop\n\ @@ -400,7 +400,7 @@ typedef uintmax_t uatomic_max_t; : "r" (mem), "0" (__result), "1" (__mask) \ : "r0", "r1", "r2", "memory"); \ else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ nop\n\ diff --git a/libc/sysdeps/linux/x86_64/bits/atomic.h b/libc/sysdeps/linux/x86_64/bits/atomic.h index 67a512568..04870cbf5 100644 --- a/libc/sysdeps/linux/x86_64/bits/atomic.h +++ b/libc/sysdeps/linux/x86_64/bits/atomic.h @@ -57,28 +57,28 @@ typedef uintmax_t uatomic_max_t; #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "cmpxchgb %b2, %1" \ : "=a" (ret), "=m" (*mem) \ : "q" (newval), "m" (*mem), "0" (oldval)); \ ret; }) #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "cmpxchgw %w2, %1" \ : "=a" (ret), "=m" (*mem) \ : "r" (newval), "m" (*mem), "0" (oldval)); \ ret; }) #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %1" \ : "=a" (ret), "=m" (*mem) \ : "r" (newval), "m" (*mem), "0" (oldval)); \ ret; }) #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchgq %q2, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "cmpxchgq %q2, %1" \ : "=a" (ret), "=m" (*mem) \ : "r" ((long) (newval)), "m" (*mem), \ "0" ((long) (oldval))); \ @@ -89,19 +89,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_exchange_acq(mem, newvalue) \ ({ __typeof (*mem) result; \ if (sizeof (*mem) == 1) \ - __asm __volatile ("xchgb %b0, %1" \ + __asm__ __volatile__ ("xchgb %b0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (newvalue), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile ("xchgw %w0, %1" \ + __asm__ __volatile__ ("xchgw %w0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (newvalue), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile ("xchgl %0, %1" \ + __asm__ __volatile__ ("xchgl %0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (newvalue), "m" (*mem)); \ else \ - __asm __volatile ("xchgq %q0, %1" \ + __asm__ __volatile__ ("xchgq %q0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" ((long) (newvalue)), "m" (*mem)); \ result; }) @@ -110,19 +110,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_exchange_and_add(mem, value) \ ({ __typeof (*mem) result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "xaddb %b0, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "xaddb %b0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (value), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "xaddw %w0, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "xaddw %w0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (value), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "xaddl %0, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "xaddl %0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" (value), "m" (*mem)); \ else \ - __asm __volatile (LOCK_PREFIX "xaddq %q0, %1" \ + __asm__ __volatile__ (LOCK_PREFIX "xaddq %q0, %1" \ : "=r" (result), "=m" (*mem) \ : "0" ((long) (value)), "m" (*mem)); \ result; }) @@ -134,19 +134,19 @@ typedef uintmax_t uatomic_max_t; else if (__builtin_constant_p (value) && (value) == 1) \ atomic_decrement (mem); \ else if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b1, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "addb %b1, %0" \ : "=m" (*mem) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w1, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "addw %w1, %0" \ : "=m" (*mem) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %1, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "addl %1, %0" \ : "=m" (*mem) \ : "ir" (value), "m" (*mem)); \ else \ - __asm __volatile (LOCK_PREFIX "addq %q1, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "addq %q1, %0" \ : "=m" (*mem) \ : "ir" ((long) (value)), "m" (*mem)); \ }) @@ -155,19 +155,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_add_negative(mem, value) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addb %b2, %0; sets %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addw %w2, %0; sets %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addl %2, %0; sets %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else \ - __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addq %q2, %0; sets %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" ((long) (value)), "m" (*mem)); \ __result; }) @@ -176,19 +176,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_add_zero(mem, value) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addb %b2, %0; setz %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addw %w2, %0; setz %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addl %2, %0; setz %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" (value), "m" (*mem)); \ else \ - __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \ + __asm__ __volatile__ (LOCK_PREFIX "addq %q2, %0; setz %1" \ : "=m" (*mem), "=qm" (__result) \ : "ir" ((long) (value)), "m" (*mem)); \ __result; }) @@ -196,19 +196,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_increment(mem) \ (void) ({ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "incb %b0" \ + __asm__ __volatile__ (LOCK_PREFIX "incb %b0" \ : "=m" (*mem) \ : "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "incw %w0" \ + __asm__ __volatile__ (LOCK_PREFIX "incw %w0" \ : "=m" (*mem) \ : "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "incl %0" \ + __asm__ __volatile__ (LOCK_PREFIX "incl %0" \ : "=m" (*mem) \ : "m" (*mem)); \ else \ - __asm __volatile (LOCK_PREFIX "incq %q0" \ + __asm__ __volatile__ (LOCK_PREFIX "incq %q0" \ : "=m" (*mem) \ : "m" (*mem)); \ }) @@ -217,19 +217,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_increment_and_test(mem) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "incb %b0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "incw %w0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "incl %0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else \ - __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "incq %q0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ __result; }) @@ -237,19 +237,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_decrement(mem) \ (void) ({ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "decb %b0" \ + __asm__ __volatile__ (LOCK_PREFIX "decb %b0" \ : "=m" (*mem) \ : "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "decw %w0" \ + __asm__ __volatile__ (LOCK_PREFIX "decw %w0" \ : "=m" (*mem) \ : "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "decl %0" \ + __asm__ __volatile__ (LOCK_PREFIX "decl %0" \ : "=m" (*mem) \ : "m" (*mem)); \ else \ - __asm __volatile (LOCK_PREFIX "decq %q0" \ + __asm__ __volatile__ (LOCK_PREFIX "decq %q0" \ : "=m" (*mem) \ : "m" (*mem)); \ }) @@ -258,19 +258,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_decrement_and_test(mem) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "decb %b0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "decw %w0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "decl %0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ else \ - __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \ + __asm__ __volatile__ (LOCK_PREFIX "decq %q0; sete %1" \ : "=m" (*mem), "=qm" (__result) \ : "m" (*mem)); \ __result; }) @@ -278,23 +278,23 @@ typedef uintmax_t uatomic_max_t; #define atomic_bit_set(mem, bit) \ (void) ({ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "orb %b2, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "orb %b2, %0" \ : "=m" (*mem) \ : "m" (*mem), "ir" (1L << (bit))); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "orw %w2, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "orw %w2, %0" \ : "=m" (*mem) \ : "m" (*mem), "ir" (1L << (bit))); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "orl %2, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "orl %2, %0" \ : "=m" (*mem) \ : "m" (*mem), "ir" (1L << (bit))); \ else if (__builtin_constant_p (bit) && (bit) < 32) \ - __asm __volatile (LOCK_PREFIX "orq %2, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "orq %2, %0" \ : "=m" (*mem) \ : "m" (*mem), "i" (1L << (bit))); \ else \ - __asm __volatile (LOCK_PREFIX "orq %q2, %0" \ + __asm__ __volatile__ (LOCK_PREFIX "orq %q2, %0" \ : "=m" (*mem) \ : "m" (*mem), "r" (1UL << (bit))); \ }) @@ -303,19 +303,19 @@ typedef uintmax_t uatomic_max_t; #define atomic_bit_test_set(mem, bit) \ ({ unsigned char __result; \ if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \ + __asm__ __volatile__ (LOCK_PREFIX "btsb %3, %1; setc %0" \ : "=q" (__result), "=m" (*mem) \ : "m" (*mem), "ir" (bit)); \ else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \ + __asm__ __volatile__ (LOCK_PREFIX "btsw %3, %1; setc %0" \ : "=q" (__result), "=m" (*mem) \ : "m" (*mem), "ir" (bit)); \ else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \ + __asm__ __volatile__ (LOCK_PREFIX "btsl %3, %1; setc %0" \ : "=q" (__result), "=m" (*mem) \ : "m" (*mem), "ir" (bit)); \ else \ - __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \ + __asm__ __volatile__ (LOCK_PREFIX "btsq %3, %1; setc %0" \ : "=q" (__result), "=m" (*mem) \ : "m" (*mem), "ir" (bit)); \ __result; }) diff --git a/libpthread/linuxthreads.old/spinlock.c b/libpthread/linuxthreads.old/spinlock.c index f5999e4d2..994596d05 100644 --- a/libpthread/linuxthreads.old/spinlock.c +++ b/libpthread/linuxthreads.old/spinlock.c @@ -34,7 +34,7 @@ static inline void __pthread_release(int * spinlock) { WRITE_MEMORY_BARRIER(); *spinlock = __LT_SPINLOCK_INIT; - __asm __volatile__ ("" : "=m" (*spinlock) : "m" (*spinlock)); + __asm__ __volatile__ ("" : "=m" (*spinlock) : "m" (*spinlock)); } @@ -110,7 +110,7 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock, #ifdef BUSY_WAIT_NOP BUSY_WAIT_NOP; #endif - __asm __volatile ("" : "=m" (lock->__status) : "m" (lock->__status)); + __asm__ __volatile__ ("" : "=m" (lock->__status) : "m" (lock->__status)); } lock->__spinlock += (spin_count - lock->__spinlock) / 8; |