diff options
author | Bernhard Reutner-Fischer <rep.dot.nop@gmail.com> | 2008-04-24 09:24:59 +0000 |
---|---|---|
committer | Bernhard Reutner-Fischer <rep.dot.nop@gmail.com> | 2008-04-24 09:24:59 +0000 |
commit | 1d8abd74c4ae9b28035b549345f9f736cdb98c10 (patch) | |
tree | 9a534ad59f2ddfc18076a92e3331128d4c5bd2da /libc/sysdeps/linux | |
parent | 1db4be5334a327dde925c73b8d924440257cf487 (diff) |
- fixup asm. No object-code changes
Diffstat (limited to 'libc/sysdeps/linux')
41 files changed, 184 insertions, 184 deletions
diff --git a/libc/sysdeps/linux/alpha/bits/atomic.h b/libc/sysdeps/linux/alpha/bits/atomic.h index 36a740c75..bbfd201b5 100644 --- a/libc/sysdeps/linux/alpha/bits/atomic.h +++ b/libc/sysdeps/linux/alpha/bits/atomic.h @@ -363,7 +363,7 @@ typedef uintmax_t uatomic_max_t; */ #ifndef UP -# define atomic_full_barrier() __asm ("mb" : : : "memory"); -# define atomic_read_barrier() __asm ("mb" : : : "memory"); -# define atomic_write_barrier() __asm ("wmb" : : : "memory"); +# define atomic_full_barrier() __asm__ ("mb" : : : "memory"); +# define atomic_read_barrier() __asm__ ("mb" : : : "memory"); +# define atomic_write_barrier() __asm__ ("wmb" : : : "memory"); #endif diff --git a/libc/sysdeps/linux/alpha/bits/mathinline.h b/libc/sysdeps/linux/alpha/bits/mathinline.h index 87d40058c..3dd38e89f 100644 --- a/libc/sysdeps/linux/alpha/bits/mathinline.h +++ b/libc/sysdeps/linux/alpha/bits/mathinline.h @@ -38,7 +38,7 @@ # define isunordered(u, v) \ (__extension__ \ ({ double __r, __u = (u), __v = (v); \ - __asm ("cmptun/su %1,%2,%0\n\ttrapb" \ + __asm__ ("cmptun/su %1,%2,%0\n\ttrapb" \ : "=&f" (__r) : "f" (__u), "f"(__v)); \ __r != 0; })) #endif /* ISO C99 */ @@ -52,7 +52,7 @@ __MATH_INLINE TYPE \ __NTH (NAME (TYPE __x, TYPE __y)) \ { \ TYPE __z; \ - __asm ("cpys %1, %2, %0" : "=f" (__z) : "f" (__y), "f" (__x)); \ + __asm__ ("cpys %1, %2, %0" : "=f" (__z) : "f" (__y), "f" (__x)); \ return __z; \ } @@ -71,7 +71,7 @@ __MATH_INLINE TYPE \ __NTH (NAME (TYPE __x)) \ { \ TYPE __z; \ - __asm ("cpys $f31, %1, %0" : "=f" (__z) : "f" (__x)); \ + __asm__ ("cpys $f31, %1, %0" : "=f" (__z) : "f" (__x)); \ return __z; \ } @@ -101,7 +101,7 @@ __NTH (__floorf (float __x)) float __tmp1, __tmp2; - __asm ("cvtst/s %3,%2\n\t" + __asm__ ("cvtst/s %3,%2\n\t" #ifdef _IEEE_FP_INEXACT "cvttq/svim %2,%1\n\t" #else @@ -120,7 +120,7 @@ __NTH (__floor (double __x)) if (__x != 0 && fabs (__x) < 9007199254740992.0) /* 1 << DBL_MANT_DIG */ { double __tmp1; - __asm ( + __asm__ ( #ifdef _IEEE_FP_INEXACT "cvttq/svim %2,%1\n\t" #else diff --git a/libc/sysdeps/linux/cris/__init_brk.c b/libc/sysdeps/linux/cris/__init_brk.c index c8c6a37e7..27b8524b2 100644 --- a/libc/sysdeps/linux/cris/__init_brk.c +++ b/libc/sysdeps/linux/cris/__init_brk.c @@ -15,7 +15,7 @@ __init_brk (void) /* Notice that we don't need to save/restore the GOT * register since that is not call clobbered by the syscall. */ - asm ("clear.d $r10\n\t" + __asm__ ("clear.d $r10\n\t" "movu.w " STR(__NR_brk) ",$r9\n\t" "break 13\n\t" "move.d $r10, %0" diff --git a/libc/sysdeps/linux/cris/bits/syscalls.h b/libc/sysdeps/linux/cris/bits/syscalls.h index 065f48742..d44e79ca2 100644 --- a/libc/sysdeps/linux/cris/bits/syscalls.h +++ b/libc/sysdeps/linux/cris/bits/syscalls.h @@ -66,11 +66,11 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ #define INLINE_SYSCALL(name, nr, args...) \ ({ \ unsigned long __sys_res; \ - register unsigned long __res asm ("r10"); \ + register unsigned long __res __asm__ ("r10"); \ LOAD_ARGS_c_##nr (args) \ - register unsigned long __callno asm ("r9") \ + register unsigned long __callno __asm__ ("r9") \ = SYS_ify (name); \ - asm volatile (LOAD_ARGS_asm_##nr (args) \ + __asm__ __volatile__ (LOAD_ARGS_asm_##nr (args) \ CHECK_ARGS_asm_##nr \ "break 13" \ : "=r" (__res) \ diff --git a/libc/sysdeps/linux/cris/brk.c b/libc/sysdeps/linux/cris/brk.c index 938b15714..ae99e109c 100644 --- a/libc/sysdeps/linux/cris/brk.c +++ b/libc/sysdeps/linux/cris/brk.c @@ -20,7 +20,7 @@ int brk(void * end_data_seg) * Notice that we don't need to save/restore the GOT * register since that is not call clobbered by the syscall. */ - asm ("move.d %1,$r10\n\t" + __asm__ ("move.d %1,$r10\n\t" "movu.w " STR(__NR_brk) ",$r9\n\t" "break 13\n\t" "move.d $r10, %0" diff --git a/libc/sysdeps/linux/cris/sbrk.c b/libc/sysdeps/linux/cris/sbrk.c index f3fab95bb..830d01dd6 100644 --- a/libc/sysdeps/linux/cris/sbrk.c +++ b/libc/sysdeps/linux/cris/sbrk.c @@ -24,7 +24,7 @@ sbrk(intptr_t increment) * Notice that we don't need to save/restore the GOT * register since that is not call clobbered by the syscall. */ - asm ("move.d %1,$r10\n\t" + __asm__ ("move.d %1,$r10\n\t" "movu.w " STR(__NR_brk) ",$r9\n\t" "break 13\n\t" "move.d $r10, %0" diff --git a/libc/sysdeps/linux/e1/bits/fenvinline.h b/libc/sysdeps/linux/e1/bits/fenvinline.h index 27ee172a5..cce266131 100644 --- a/libc/sysdeps/linux/e1/bits/fenvinline.h +++ b/libc/sysdeps/linux/e1/bits/fenvinline.h @@ -57,7 +57,7 @@ #define fegetround() \ ({ \ unsigned int tmp; \ - asm volatile("mov %0, SR" \ + __asm__ __volatile__("mov %0, SR" \ :"=l"(tmp) \ :/*no input*/); \ tmp &= (3<<13); \ @@ -70,7 +70,7 @@ unsigned int tmp = (3 << 13); \ while(1) { \ /* Clear SR.FRM field */ \ - asm volatile("andn SR, %0" \ + __asm__ __volatile__("andn SR, %0" \ :/*no output*/ \ :"l"(tmp) ); \ tmp &= round; \ @@ -80,7 +80,7 @@ break; \ } \ \ - asm volatile("or SR, %0" \ + __asm__ __volatile__("or SR, %0" \ :/*no input*/ \ :"l"(round) ); \ tmp = 0; \ @@ -100,7 +100,7 @@ static inline feclearexcept(int __excepts) if( __excepts & (~0x1F00) ) return -1; - asm volatile("mov %0, SR" + __asm__ __volatile__("mov %0, SR" :"=l"(enabled_excepts) :/*no input*/ ); @@ -112,7 +112,7 @@ static inline feclearexcept(int __excepts) disabled_excepts &= __excepts; /* Clear accrued exceptions */ - asm volatile("andn G2, %0\n\t" + __asm__ __volatile__("andn G2, %0\n\t" "andn G2, %1\n\t" :/*no output*/ :"l"(enabled_excepts), @@ -133,7 +133,7 @@ inline int fetestexcept(int __excepts) if( __excepts & (~0x1F00) ) return -1; - asm volatile("mov %0, SR" + __asm__ __volatile__("mov %0, SR" :"=l"(enabled_excepts) :/*no input*/ ); @@ -141,7 +141,7 @@ inline int fetestexcept(int __excepts) disabled_excepts = ~enabled_excepts; disabled_excepts &= 0x1F00; - asm volatile("mov %0, G2" + __asm__ __volatile__("mov %0, G2" :"=l"(G2) :/*no input*/ ); @@ -154,7 +154,7 @@ inline int fetestexcept(int __excepts) static inline int feraiseexcept(int __excepts) { - asm volatile("or G2, %0" + __asm__ __volatile__("or G2, %0" :/*no output*/ :"l"( __excepts >> 8 ) ); return 0; @@ -169,7 +169,7 @@ static inline int feraiseexcept(int __excepts) int __tmpexcepts = __excepts; \ \ while(1) { \ - asm volatile("mov %0, SR" \ + __asm__ __volatile__("mov %0, SR" \ :"=l"(__pexcepts) \ :/*no input*/ ); \ __pexcepts &= 0x1F00; \ @@ -181,7 +181,7 @@ static inline int feraiseexcept(int __excepts) break; \ } \ \ - asm volatile("or SR, %0" \ + __asm__ __volatile__("or SR, %0" \ :/*no output*/ \ :"l"(__tmpexcepts) ); \ __retval = __pexcepts; \ @@ -197,7 +197,7 @@ static inline int feraiseexcept(int __excepts) int __tmpexcepts = __excepts; \ \ while(1) { \ - asm volatile("mov %0, SR" \ + __asm__ __volatile__("mov %0, SR" \ :"=l"(__pexcepts) \ :/*no input*/ ); \ __pexcepts &= 0x1F00; \ @@ -209,7 +209,7 @@ static inline int feraiseexcept(int __excepts) break; \ } \ \ - asm volatile("andn SR, %0" \ + __asm__ __volatile__("andn SR, %0" \ :/*no output*/ \ :"l"(__tmpexcepts) ); \ __retval = __pexcepts; \ @@ -221,7 +221,7 @@ static inline int feraiseexcept(int __excepts) static inline int fegetexcept(int excepts) { unsigned int tmp; - asm volatile("mov %0, SR" + __asm__ __volatile__("mov %0, SR" :"=l"(tmp) :/*no input*/ ); tmp &= 0x1F00; @@ -230,7 +230,7 @@ static inline int fegetexcept(int excepts) static inline int fegetenv(fenv_t *envp) { - asm volatile("mov %0, SR\n\t + __asm__ __volatile__("mov %0, SR\n\t mov %1, SR\n\t mov %2, G2\n\t mov %3, G2\n\t" @@ -258,14 +258,14 @@ static inline int fegetenv(fenv_t *envp) ({ \ /* Clear FRM & FTE field of SR */ \ unsigned long clearSR = ( 127<<8 ); \ - asm volatile("andn SR, %0\n\t" \ + __asm__ __volatile__("andn SR, %0\n\t" \ "or SR, %1\n\t" \ "or SR, %2\n\t" \ :/*no output*/ \ :"l"(clearSR), \ "l"(envp->round_mode), \ "l"(envp->trap_enabled) ); \ - asm volatile("andn G2, 0x1F1F\n\t" \ + __asm__ __volatile__("andn G2, 0x1F1F\n\t" \ "or G2, %0\n\t" \ "or G2, %1\n\t" \ :/*no output*/ \ @@ -277,14 +277,14 @@ static inline int fegetenv(fenv_t *envp) #define feupdateenv(envp) \ ({ \ /* Clear FRM & FTE field of SR */ \ - asm volatile(/* We dont clear the prev SR*/ \ + __asm__ __volatile__(/* We dont clear the prev SR*/ \ "or SR, %1\n\t" \ "or SR, %2\n\t" \ :/*no output*/ \ :"l"(clearSR), \ "l"(envp->round_mode), \ "l"(envp->accrued_except) ); \ - asm volatile(/* We dont clear the prev SR*/ \ + __asm__ __volatile__(/* We dont clear the prev SR*/ \ "or G2, %0\n\t" \ "or G2, %1\n\t" \ :/*no output*/ \ diff --git a/libc/sysdeps/linux/e1/longjmp.c b/libc/sysdeps/linux/e1/longjmp.c index e628bd7e0..fbd5103a4 100644 --- a/libc/sysdeps/linux/e1/longjmp.c +++ b/libc/sysdeps/linux/e1/longjmp.c @@ -27,7 +27,7 @@ void longjmp(jmp_buf state, int value ) e1newSP(state->__jmpbuf->SavedSP); #define _state_ ((struct __jmp_buf_tag*)jmpbuf_ptr) - asm volatile("mov L0, %0\n\t" + __asm__ __volatile__("mov L0, %0\n\t" "mov L1, %1\n\t" "mov L2, %2\n\t" "mov G3, %3\n\t" @@ -60,7 +60,7 @@ void siglongjmp(sigjmp_buf state, int value ) #define _state_ ((struct __jmp_buf_tag*)jmpbuf_ptr) - asm volatile("mov L0, %0\n\t" + __asm__ __volatile__("mov L0, %0\n\t" "mov L1, %1\n\t" "mov L2, %2\n\t" "mov G3, %3\n\t" diff --git a/libc/sysdeps/linux/e1/setjmp.c b/libc/sysdeps/linux/e1/setjmp.c index 750c4e08e..3a3f3b7e8 100644 --- a/libc/sysdeps/linux/e1/setjmp.c +++ b/libc/sysdeps/linux/e1/setjmp.c @@ -11,14 +11,14 @@ libc_hidden_proto(sigprocmask) int setjmp( jmp_buf state) { - asm volatile( "mov %0, G3\n\t" + __asm__ __volatile__( "mov %0, G3\n\t" "mov %1, G4\n\t" :"=l"(state->__jmpbuf->G3), "=l"(state->__jmpbuf->G4) :/*no input*/ :"%G3", "%G4" ); - asm volatile( "setadr %0\n\t" + __asm__ __volatile__( "setadr %0\n\t" "mov %1, L1\n\t" "mov %2, L2\n\t" :"=l"(state->__jmpbuf->SavedSP), @@ -38,14 +38,14 @@ int sigsetjmp( sigjmp_buf state , int savesigs) } else state->__mask_was_saved = 0; - asm volatile( "mov %0, G3\n\t" + __asm__ __volatile__( "mov %0, G3\n\t" "mov %1, G4\n\t" :"=l"(state->__jmpbuf->G3), "=l"(state->__jmpbuf->G4) :/*no input*/ :"%G3", "%G4" ); - asm volatile( "setadr %0\n\t" + __asm__ __volatile__( "setadr %0\n\t" "mov %1, L2\n\t" "mov %2, L3\n\t" :"=l"(state->__jmpbuf->SavedSP), diff --git a/libc/sysdeps/linux/h8300/brk.c b/libc/sysdeps/linux/h8300/brk.c index 668e6fe70..9eab66060 100644 --- a/libc/sysdeps/linux/h8300/brk.c +++ b/libc/sysdeps/linux/h8300/brk.c @@ -17,7 +17,7 @@ int brk (void *addr) { void *newbrk; - asm ("mov.l %2,er1\n\t" + __asm__ ("mov.l %2,er1\n\t" "mov.l %1,er0\n\t" "trapa #0\n\t" "mov.l er0,%0" diff --git a/libc/sysdeps/linux/h8300/ptrace.c b/libc/sysdeps/linux/h8300/ptrace.c index 7ce1b277e..1dd7d063d 100644 --- a/libc/sysdeps/linux/h8300/ptrace.c +++ b/libc/sysdeps/linux/h8300/ptrace.c @@ -11,7 +11,7 @@ ptrace(int request, int pid, int addr, int data) if (request > 0 && request < 4) data = (int)&ret; - __asm__ volatile ("sub.l er0,er0\n\t" + __asm__ __volatile__ ("sub.l er0,er0\n\t" "mov.b %1,r0l\n\t" "mov.l %2,er1\n\t" "mov.l %3,er2\n\t" diff --git a/libc/sysdeps/linux/hppa/bits/syscalls.h b/libc/sysdeps/linux/hppa/bits/syscalls.h index 99effb260..9035cd5aa 100644 --- a/libc/sysdeps/linux/hppa/bits/syscalls.h +++ b/libc/sysdeps/linux/hppa/bits/syscalls.h @@ -48,10 +48,10 @@ #define K_INLINE_SYSCALL(name, nr, args...) ({ \ long __sys_res; \ { \ - register unsigned long __res asm("r28"); \ + register unsigned long __res __asm__("r28"); \ K_LOAD_ARGS_##nr(args) \ /* FIXME: HACK stw/ldw r19 around syscall */ \ - asm volatile( \ + __asm__ __volatile__( \ K_STW_ASM_PIC \ " ble 0x100(%%sr2, %%r0)\n" \ " ldi %1, %%r20\n" \ diff --git a/libc/sysdeps/linux/hppa/syscall.c b/libc/sysdeps/linux/hppa/syscall.c index 9abdc5372..04e80b2b2 100644 --- a/libc/sysdeps/linux/hppa/syscall.c +++ b/libc/sysdeps/linux/hppa/syscall.c @@ -45,9 +45,9 @@ syscall (long int __sysno, ...) va_end (args); { - register unsigned long int __res asm("r28"); + register unsigned long int __res __asm__("r28"); K_LOAD_ARGS_6 (arg0, arg1, arg2, arg3, arg4, arg5) - asm volatile (K_STW_ASM_PIC + __asm__ __volatile__ (K_STW_ASM_PIC " ble 0x100(%%sr2, %%r0) \n" " copy %1, %%r20 \n" K_LDW_ASM_PIC diff --git a/libc/sysdeps/linux/i386/bits/atomic.h b/libc/sysdeps/linux/i386/bits/atomic.h index e0c5ae79c..33279af5f 100644 --- a/libc/sysdeps/linux/i386/bits/atomic.h +++ b/libc/sysdeps/linux/i386/bits/atomic.h @@ -366,4 +366,4 @@ typedef uintmax_t uatomic_max_t; __result; }) -#define atomic_delay() asm ("rep; nop") +#define atomic_delay() __asm__ ("rep; nop") diff --git a/libc/sysdeps/linux/i386/bits/syscalls.h b/libc/sysdeps/linux/i386/bits/syscalls.h index 15cd9f3e4..014539c2a 100644 --- a/libc/sysdeps/linux/i386/bits/syscalls.h +++ b/libc/sysdeps/linux/i386/bits/syscalls.h @@ -165,7 +165,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ #define INTERNAL_SYSCALL(name, err, nr, args...) \ ({ \ register unsigned int resultvar; \ - asm volatile ( \ + __asm__ __volatile__ ( \ LOADARGS_##nr \ "movl %1, %%eax\n\t" \ "int $0x80\n\t" \ diff --git a/libc/sysdeps/linux/ia64/__syscall_error.c b/libc/sysdeps/linux/ia64/__syscall_error.c index bb4ba4755..0727b2b53 100644 --- a/libc/sysdeps/linux/ia64/__syscall_error.c +++ b/libc/sysdeps/linux/ia64/__syscall_error.c @@ -13,7 +13,7 @@ int __syscall_error(void) attribute_hidden; int __syscall_error(void) { - register int err_no asm("%r8"); + register int err_no __asm__("%r8"); __set_errno(err_no); return -1; } diff --git a/libc/sysdeps/linux/ia64/bits/syscalls.h b/libc/sysdeps/linux/ia64/bits/syscalls.h index b9c45a741..4e8a305ea 100644 --- a/libc/sysdeps/linux/ia64/bits/syscalls.h +++ b/libc/sysdeps/linux/ia64/bits/syscalls.h @@ -40,9 +40,9 @@ #define _DO_SYSCALL(name, nr, args...) \ LOAD_ARGS_##nr (args) \ - register long _r8 asm ("r8"); \ - register long _r10 asm ("r10"); \ - register long _r15 asm ("r15") = SYS_ify(name); \ + register long _r8 __asm__ ("r8"); \ + register long _r10 __asm__ ("r10"); \ + register long _r15 __asm__ ("r15") = SYS_ify(name); \ long _retval; \ LOAD_REGS_##nr \ __asm __volatile ("break " ___IA64_BREAK_SYSCALL ";;\n\t" \ @@ -61,37 +61,37 @@ long _arg1 = (long) (a1); \ LOAD_ARGS_0 () #define LOAD_REGS_1 \ - register long _out0 asm ("out0") = _arg1; \ + register long _out0 __asm__ ("out0") = _arg1; \ LOAD_REGS_0 #define LOAD_ARGS_2(a1, a2) \ long _arg2 = (long) (a2); \ LOAD_ARGS_1 (a1) #define LOAD_REGS_2 \ - register long _out1 asm ("out1") = _arg2; \ + register long _out1 __asm__ ("out1") = _arg2; \ LOAD_REGS_1 #define LOAD_ARGS_3(a1, a2, a3) \ long _arg3 = (long) (a3); \ LOAD_ARGS_2 (a1, a2) #define LOAD_REGS_3 \ - register long _out2 asm ("out2") = _arg3; \ + register long _out2 __asm__ ("out2") = _arg3; \ LOAD_REGS_2 #define LOAD_ARGS_4(a1, a2, a3, a4) \ long _arg4 = (long) (a4); \ LOAD_ARGS_3 (a1, a2, a3) #define LOAD_REGS_4 \ - register long _out3 asm ("out3") = _arg4; \ + register long _out3 __asm__ ("out3") = _arg4; \ LOAD_REGS_3 #define LOAD_ARGS_5(a1, a2, a3, a4, a5) \ long _arg5 = (long) (a5); \ LOAD_ARGS_4 (a1, a2, a3, a4) #define LOAD_REGS_5 \ - register long _out4 asm ("out4") = _arg5; \ + register long _out4 __asm__ ("out4") = _arg5; \ LOAD_REGS_4 #define LOAD_ARGS_6(a1, a2, a3, a4, a5, a6) \ long _arg6 = (long) (a6); \ LOAD_ARGS_5 (a1, a2, a3, a4, a5) #define LOAD_REGS_6 \ - register long _out5 asm ("out5") = _arg6; \ + register long _out5 __asm__ ("out5") = _arg6; \ LOAD_REGS_5 #define ASM_OUTARGS_0 diff --git a/libc/sysdeps/linux/m68k/__syscall_error.c b/libc/sysdeps/linux/m68k/__syscall_error.c index 5cfdead8b..a29f6ffd6 100644 --- a/libc/sysdeps/linux/m68k/__syscall_error.c +++ b/libc/sysdeps/linux/m68k/__syscall_error.c @@ -13,7 +13,7 @@ int __syscall_error(void) attribute_hidden; int __syscall_error(void) { - register int err_no asm("%d0"); + register int err_no __asm__("%d0"); __set_errno(-err_no); return -1; } diff --git a/libc/sysdeps/linux/m68k/bits/mathinline.h b/libc/sysdeps/linux/m68k/bits/mathinline.h index acbac47aa..8cc21694b 100644 --- a/libc/sysdeps/linux/m68k/bits/mathinline.h +++ b/libc/sysdeps/linux/m68k/bits/mathinline.h @@ -121,7 +121,7 @@ __m81_defun (float_type, func, (float_type __mathop_x)) \ { \ float_type __result; \ - __asm("f" __STRING(op) "%.x %1, %0" : "=f" (__result) : "f" (__mathop_x));\ + __asm__("f" __STRING(op) "%.x %1, %0" : "=f" (__result) : "f" (__mathop_x));\ return __result; \ } @@ -222,7 +222,7 @@ __m81_defun (int, __CONCAT(__isinf,s), (float_type __value)) \ /* There is no branch-condition for infinity, \ so we must extract and examine the condition codes manually. */ \ unsigned long int __fpsr; \ - __asm("ftst%.x %1\n" \ + __asm__("ftst%.x %1\n" \ "fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value)); \ return (__fpsr & (2 << 24)) ? (__fpsr & (8 << 24) ? -1 : 1) : 0; \ } \ @@ -232,7 +232,7 @@ __m81_defun (int, __CONCAT(__finite,s), (float_type __value)) \ /* There is no branch-condition for infinity, so we must extract and \ examine the condition codes manually. */ \ unsigned long int __fpsr; \ - __asm ("ftst%.x %1\n" \ + __asm__ ("ftst%.x %1\n" \ "fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value)); \ return (__fpsr & (3 << 24)) == 0; \ } \ @@ -241,7 +241,7 @@ __m81_defun (float_type, __CONCAT(__scalbn,s), \ (float_type __x, int __n)) \ { \ float_type __result; \ - __asm ("fscale%.l %1, %0" : "=f" (__result) : "dmi" (__n), "0" (__x)); \ + __asm__ ("fscale%.l %1, %0" : "=f" (__result) : "dmi" (__n), "0" (__x)); \ return __result; \ } @@ -258,7 +258,7 @@ __inline_functions(long double,l) __m81_defun (int, __CONCAT(__isnan,s), (float_type __value)) \ { \ char __result; \ - __asm("ftst%.x %1\n" \ + __asm__("ftst%.x %1\n" \ "fsun %0" : "=dm" (__result) : "f" (__value)); \ return __result; \ } @@ -280,7 +280,7 @@ __m81_defun (int, __CONCAT(__signbit,s), (float_type __value)) \ /* There is no branch-condition for the sign bit, so we must extract \ and examine the condition codes manually. */ \ unsigned long int __fpsr; \ - __asm ("ftst%.x %1\n" \ + __asm__ ("ftst%.x %1\n" \ "fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value)); \ return (__fpsr >> 27) & 1; \ } \ @@ -308,7 +308,7 @@ __m81_defun (float_type, __CONCAT(__nearbyint,s), (float_type __x)) \ __m81_defun (long int, __CONCAT(__lrint,s), (float_type __x)) \ { \ long int __result; \ - __asm ("fmove%.l %1, %0" : "=dm" (__result) : "f" (__x)); \ + __asm__ ("fmove%.l %1, %0" : "=dm" (__result) : "f" (__x)); \ return __result; \ } \ \ @@ -333,7 +333,7 @@ __m81_inline void \ __m81_u(__CONCAT(__sincos,s))(float_type __x, float_type *__sinx, \ float_type *__cosx) \ { \ - __asm ("fsincos%.x %2,%1:%0" \ + __asm__ ("fsincos%.x %2,%1:%0" \ : "=f" (*__sinx), "=f" (*__cosx) : "f" (__x)); \ } diff --git a/libc/sysdeps/linux/m68k/brk.c b/libc/sysdeps/linux/m68k/brk.c index cad5976de..7daf1bd76 100644 --- a/libc/sysdeps/linux/m68k/brk.c +++ b/libc/sysdeps/linux/m68k/brk.c @@ -18,7 +18,7 @@ int brk (void *addr) { void *newbrk; - __asm__ volatile ("movel %2,%/d1\n\t" + __asm__ __volatile__ ("movel %2,%/d1\n\t" "moveq %1,%/d0\n\t" "trap #0\n\t" "movel %/d0,%0" diff --git a/libc/sysdeps/linux/m68k/fpu_control.h b/libc/sysdeps/linux/m68k/fpu_control.h index 484bad3b9..040e62c94 100644 --- a/libc/sysdeps/linux/m68k/fpu_control.h +++ b/libc/sysdeps/linux/m68k/fpu_control.h @@ -93,7 +93,7 @@ typedef unsigned int fpu_control_t __attribute__ ((__mode__ (__SI__))); /* Macros for accessing the hardware control word. */ #define _FPU_GETCW(cw) __asm__ ("fmove%.l %!, %0" : "=dm" (cw)) -#define _FPU_SETCW(cw) __asm__ volatile ("fmove%.l %0, %!" : : "dm" (cw)) +#define _FPU_SETCW(cw) __asm__ __volatile__ ("fmove%.l %0, %!" : : "dm" (cw)) #if 0 /* Default control word set at startup. */ diff --git a/libc/sysdeps/linux/microblaze/clone.c b/libc/sysdeps/linux/microblaze/clone.c index f82cd9dc0..887e2c8ac 100644 --- a/libc/sysdeps/linux/microblaze/clone.c +++ b/libc/sysdeps/linux/microblaze/clone.c @@ -19,19 +19,19 @@ int clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg) { - register unsigned long rval asm (SYSCALL_RET) = -EINVAL; + register unsigned long rval __asm__ (SYSCALL_RET) = -EINVAL; if (fn && child_stack) { - register unsigned long syscall asm (SYSCALL_NUM); - register unsigned long arg0 asm (SYSCALL_ARG0); - register unsigned long arg1 asm (SYSCALL_ARG1); + register unsigned long syscall __asm__ (SYSCALL_NUM); + register unsigned long arg0 __asm__ (SYSCALL_ARG0); + register unsigned long arg1 __asm__ (SYSCALL_ARG1); /* Clone this thread. */ arg0 = flags; arg1 = (unsigned long)child_stack; syscall = __NR_clone; - asm volatile ("bralid r17, trap;nop;" + __asm__ __volatile__ ("bralid r17, trap;nop;" : "=r" (rval), "=r" (syscall) : "1" (syscall), "r" (arg0), "r" (arg1) : SYSCALL_CLOBBERS); @@ -41,7 +41,7 @@ clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg) { arg0 = (*fn) (arg); syscall = __NR_exit; - asm volatile ("bralid r17, trap;nop;" + __asm__ __volatile__ ("bralid r17, trap;nop;" : "=r" (rval), "=r" (syscall) : "1" (syscall), "r" (arg0) : SYSCALL_CLOBBERS); diff --git a/libc/sysdeps/linux/microblaze/syscall.c b/libc/sysdeps/linux/microblaze/syscall.c index be628a878..a7fd3ae7b 100644 --- a/libc/sysdeps/linux/microblaze/syscall.c +++ b/libc/sysdeps/linux/microblaze/syscall.c @@ -26,18 +26,18 @@ syscall (long num, arg_t a1, arg_t a2, arg_t a3, arg_t a4, arg_t a5, arg_t a6) off the stack even for (the majority of) system calls with fewer arguments; hopefully this won't cause any problems. A1-A4 are in registers, so they're OK. */ - register arg_t a asm (SYSCALL_ARG0) = a1; - register arg_t b asm (SYSCALL_ARG1) = a2; - register arg_t c asm (SYSCALL_ARG2) = a3; - register arg_t d asm (SYSCALL_ARG3) = a4; - register arg_t e asm (SYSCALL_ARG4) = a5; - register arg_t f asm (SYSCALL_ARG5) = a6; - register unsigned long syscall asm (SYSCALL_NUM) = num; - register unsigned long ret asm (SYSCALL_RET); + register arg_t a __asm__ (SYSCALL_ARG0) = a1; + register arg_t b __asm__ (SYSCALL_ARG1) = a2; + register arg_t c __asm__ (SYSCALL_ARG2) = a3; + register arg_t d __asm__ (SYSCALL_ARG3) = a4; + register arg_t e __asm__ (SYSCALL_ARG4) = a5; + register arg_t f __asm__ (SYSCALL_ARG5) = a6; + register unsigned long syscall __asm__ (SYSCALL_NUM) = num; + register unsigned long ret __asm__ (SYSCALL_RET); unsigned long ret_sav; *((unsigned long *)0xFFFF4004) = (unsigned int)('+'); - asm ("brlid r17, 08x; nop;" + __asm__ ("brlid r17, 08x; nop;" : "=r" (ret) : "r" (syscall), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f) : SYSCALL_CLOBBERS); diff --git a/libc/sysdeps/linux/mips/brk.c b/libc/sysdeps/linux/mips/brk.c index d98aec6e0..36620b210 100644 --- a/libc/sysdeps/linux/mips/brk.c +++ b/libc/sysdeps/linux/mips/brk.c @@ -31,7 +31,7 @@ int brk (void *addr) { register long int res __asm__ ("$2"); - asm ("move\t$4,%2\n\t" + __asm__ ("move\t$4,%2\n\t" "li\t%0,%1\n\t" "syscall" /* Perform the system call. */ : "=r" (res) diff --git a/libc/sysdeps/linux/mips/setjmp_aux.c b/libc/sysdeps/linux/mips/setjmp_aux.c index 7158f87a9..751b32d7d 100644 --- a/libc/sysdeps/linux/mips/setjmp_aux.c +++ b/libc/sysdeps/linux/mips/setjmp_aux.c @@ -39,29 +39,29 @@ __sigsetjmp_aux (jmp_buf env, int savemask, int sp, int fp) #if defined __UCLIBC_HAS_FLOATS__ && ! defined __UCLIBC_HAS_SOFT_FLOAT__ /* Store the floating point callee-saved registers... */ #if _MIPS_SIM == _MIPS_SIM_ABI64 - asm volatile ("s.d $f24, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[0])); - asm volatile ("s.d $f25, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[1])); - asm volatile ("s.d $f26, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[2])); - asm volatile ("s.d $f27, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[3])); - asm volatile ("s.d $f28, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[4])); - asm volatile ("s.d $f29, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[5])); - asm volatile ("s.d $f30, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[6])); - asm volatile ("s.d $f31, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[7])); + __asm__ __volatile__ ("s.d $f24, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[0])); + __asm__ __volatile__ ("s.d $f25, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[1])); + __asm__ __volatile__ ("s.d $f26, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[2])); + __asm__ __volatile__ ("s.d $f27, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[3])); + __asm__ __volatile__ ("s.d $f28, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[4])); + __asm__ __volatile__ ("s.d $f29, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[5])); + __asm__ __volatile__ ("s.d $f30, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[6])); + __asm__ __volatile__ ("s.d $f31, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[7])); #else /* O32 || N32 */ - asm volatile ("s.d $f20, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[0])); - asm volatile ("s.d $f22, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[1])); - asm volatile ("s.d $f24, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[2])); - asm volatile ("s.d $f26, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[3])); - asm volatile ("s.d $f28, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[4])); - asm volatile ("s.d $f30, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[5])); + __asm__ __volatile__ ("s.d $f20, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[0])); + __asm__ __volatile__ ("s.d $f22, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[1])); + __asm__ __volatile__ ("s.d $f24, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[2])); + __asm__ __volatile__ ("s.d $f26, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[3])); + __asm__ __volatile__ ("s.d $f28, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[4])); + __asm__ __volatile__ ("s.d $f30, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[5])); #endif /* O32 || N32 */ #endif /* .. and the PC; */ #if _MIPS_SIM == _MIPS_SIM_ABI64 - asm volatile ("sd $31, %0" : : "m" (env[0].__jmpbuf[0].__pc)); + __asm__ __volatile__ ("sd $31, %0" : : "m" (env[0].__jmpbuf[0].__pc)); #else - asm volatile ("sw $31, %0" : : "m" (env[0].__jmpbuf[0].__pc)); + __asm__ __volatile__ ("sw $31, %0" : : "m" (env[0].__jmpbuf[0].__pc)); #endif /* .. and the stack pointer; */ @@ -72,35 +72,35 @@ __sigsetjmp_aux (jmp_buf env, int savemask, int sp, int fp) /* .. and the GP; */ #if _MIPS_SIM == _MIPS_SIM_ABI64 - asm volatile ("sd $gp, %0" : : "m" (env[0].__jmpbuf[0].__gp)); + __asm__ __volatile__ ("sd $gp, %0" : : "m" (env[0].__jmpbuf[0].__gp)); #else - asm volatile ("sw $gp, %0" : : "m" (env[0].__jmpbuf[0].__gp)); + __asm__ __volatile__ ("sw $gp, %0" : : "m" (env[0].__jmpbuf[0].__gp)); #endif /* .. and the callee-saved registers; */ #if (_MIPS_SIM == _MIPS_SIM_ABI32) - asm volatile ("sw $16, %0" : : "m" (env[0].__jmpbuf[0].__regs[0])); - asm volatile ("sw $17, %0" : : "m" (env[0].__jmpbuf[0].__regs[1])); - asm volatile ("sw $18, %0" : : "m" (env[0].__jmpbuf[0].__regs[2])); - asm volatile ("sw $19, %0" : : "m" (env[0].__jmpbuf[0].__regs[3])); - asm volatile ("sw $20, %0" : : "m" (env[0].__jmpbuf[0].__regs[4])); - asm volatile ("sw $21, %0" : : "m" (env[0].__jmpbuf[0].__regs[5])); - asm volatile ("sw $22, %0" : : "m" (env[0].__jmpbuf[0].__regs[6])); - asm volatile ("sw $23, %0" : : "m" (env[0].__jmpbuf[0].__regs[7])); + __asm__ __volatile__ ("sw $16, %0" : : "m" (env[0].__jmpbuf[0].__regs[0])); + __asm__ __volatile__ ("sw $17, %0" : : "m" (env[0].__jmpbuf[0].__regs[1])); + __asm__ __volatile__ ("sw $18, %0" : : "m" (env[0].__jmpbuf[0].__regs[2])); + __asm__ __volatile__ ("sw $19, %0" : : "m" (env[0].__jmpbuf[0].__regs[3])); + __asm__ __volatile__ ("sw $20, %0" : : "m" (env[0].__jmpbuf[0].__regs[4])); + __asm__ __volatile__ ("sw $21, %0" : : "m" (env[0].__jmpbuf[0].__regs[5])); + __asm__ __volatile__ ("sw $22, %0" : : "m" (env[0].__jmpbuf[0].__regs[6])); + __asm__ __volatile__ ("sw $23, %0" : : "m" (env[0].__jmpbuf[0].__regs[7])); #else /* N32 || N64 */ - asm volatile ("sd $16, %0" : : "m" (env[0].__jmpbuf[0].__regs[0])); - asm volatile ("sd $17, %0" : : "m" (env[0].__jmpbuf[0].__regs[1])); - asm volatile ("sd $18, %0" : : "m" (env[0].__jmpbuf[0].__regs[2])); - asm volatile ("sd $19, %0" : : "m" (env[0].__jmpbuf[0].__regs[3])); - asm volatile ("sd $20, %0" : : "m" (env[0].__jmpbuf[0].__regs[4])); - asm volatile ("sd $21, %0" : : "m" (env[0].__jmpbuf[0].__regs[5])); - asm volatile ("sd $22, %0" : : "m" (env[0].__jmpbuf[0].__regs[6])); - asm volatile ("sd $23, %0" : : "m" (env[0].__jmpbuf[0].__regs[7])); + __asm__ __volatile__ ("sd $16, %0" : : "m" (env[0].__jmpbuf[0].__regs[0])); + __asm__ __volatile__ ("sd $17, %0" : : "m" (env[0].__jmpbuf[0].__regs[1])); + __asm__ __volatile__ ("sd $18, %0" : : "m" (env[0].__jmpbuf[0].__regs[2])); + __asm__ __volatile__ ("sd $19, %0" : : "m" (env[0].__jmpbuf[0].__regs[3])); + __asm__ __volatile__ ("sd $20, %0" : : "m" (env[0].__jmpbuf[0].__regs[4])); + __asm__ __volatile__ ("sd $21, %0" : : "m" (env[0].__jmpbuf[0].__regs[5])); + __asm__ __volatile__ ("sd $22, %0" : : "m" (env[0].__jmpbuf[0].__regs[6])); + __asm__ __volatile__ ("sd $23, %0" : : "m" (env[0].__jmpbuf[0].__regs[7])); #endif /* N32 || N64 */ #if defined __UCLIBC_HAS_FLOATS__ && ! defined __UCLIBC_HAS_SOFT_FLOAT__ /* .. and finally get and reconstruct the floating point csr. */ - asm ("cfc1 %0, $31" : "=r" (env[0].__jmpbuf[0].__fpc_csr)); + __asm__ ("cfc1 %0, $31" : "=r" (env[0].__jmpbuf[0].__fpc_csr)); #endif /* Save the signal mask if requested. */ diff --git a/libc/sysdeps/linux/mips/sigaction.c b/libc/sysdeps/linux/mips/sigaction.c index f4b5167d6..cb819a25f 100644 --- a/libc/sysdeps/linux/mips/sigaction.c +++ b/libc/sysdeps/linux/mips/sigaction.c @@ -36,10 +36,10 @@ libc_hidden_proto(memcpy) #if _MIPS_SIM != _ABIO32 # ifdef __NR_rt_sigreturn -static void restore_rt (void) asm ("__restore_rt"); +static void restore_rt (void) __asm__ ("__restore_rt"); # endif # ifdef __NR_sigreturn -static void restore (void) asm ("__restore"); +static void restore (void) __asm__ ("__restore"); # endif #endif @@ -81,7 +81,7 @@ int __libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oa #else -extern void restore (void) asm ("__restore") attribute_hidden; +extern void restore (void) __asm__ ("__restore") attribute_hidden; /* If ACT is not NULL, change the action for SIG to *ACT. If OACT is not NULL, put the old action for SIG in *OACT. */ diff --git a/libc/sysdeps/linux/nios/brk.c b/libc/sysdeps/linux/nios/brk.c index 8e626a7fb..ea2e45765 100644 --- a/libc/sysdeps/linux/nios/brk.c +++ b/libc/sysdeps/linux/nios/brk.c @@ -28,10 +28,10 @@ libc_hidden_proto(brk) int brk (void *addr) { void *newbrk; - register int g1 asm("%g1") = __NR_brk; - register void *o0 asm("%o0") = addr; + register int g1 __asm__("%g1") = __NR_brk; + register void *o0 __asm__("%o0") = addr; - asm volatile ("trap 63\n\t" : "=r"(newbrk) : "0"(o0), "r"(g1)); + __asm__ __volatile__ ("trap 63\n\t" : "=r"(newbrk) : "0"(o0), "r"(g1)); __curbrk = newbrk; diff --git a/libc/sysdeps/linux/nios2/brk.c b/libc/sysdeps/linux/nios2/brk.c index 8d6288793..0420798bc 100644 --- a/libc/sysdeps/linux/nios2/brk.c +++ b/libc/sysdeps/linux/nios2/brk.c @@ -28,11 +28,11 @@ libc_hidden_proto(brk) int brk (void *addr) { void *newbrk; - register int r2 asm("r2") = TRAP_ID_SYSCALL; - register int r3 asm("r3") = __NR_brk; - register void *r4 asm("r4") = addr; + register int r2 __asm__("r2") = TRAP_ID_SYSCALL; + register int r3 __asm__("r3") = __NR_brk; + register void *r4 __asm__("r4") = addr; - asm volatile ("trap\n\t" : "=r"(newbrk) : "0"(r2), "r"(r3), "r"(r4)); + __asm__ __volatile__ ("trap\n\t" : "=r"(newbrk) : "0"(r2), "r"(r3), "r"(r4)); __curbrk = newbrk; diff --git a/libc/sysdeps/linux/nios2/clone.c b/libc/sysdeps/linux/nios2/clone.c index 78dcac1af..eec9f42ff 100644 --- a/libc/sysdeps/linux/nios2/clone.c +++ b/libc/sysdeps/linux/nios2/clone.c @@ -19,19 +19,19 @@ int clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg, ...) { - register unsigned long rval asm ("r2") = -EINVAL; + register unsigned long rval __asm__ ("r2") = -EINVAL; if (fn && child_stack) { - register unsigned long syscall asm ("r3"); - register unsigned long arg0 asm ("r4"); - register unsigned long arg1 asm ("r5"); + register unsigned long syscall __asm__ ("r3"); + register unsigned long arg0 __asm__ ("r4"); + register unsigned long arg1 __asm__ ("r5"); /* Clone this thread. */ rval = TRAP_ID_SYSCALL; syscall = __NR_clone; arg0 = flags; arg1 = (unsigned long)child_stack; - asm volatile ("trap " + __asm__ __volatile__ ("trap " : "=r" (rval), "=r" (syscall) : "0" (rval),"1" (syscall), "r" (arg0), "r" (arg1) ); @@ -40,7 +40,7 @@ int clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg, ...) /* In child thread, call fn and exit. */ arg0 = (*fn) (arg); syscall = __NR_exit; - asm volatile ("trap " + __asm__ __volatile__ ("trap " : "=r" (rval), "=r" (syscall) : "1" (syscall), "r" (arg0)); } diff --git a/libc/sysdeps/linux/nios2/syscall.c b/libc/sysdeps/linux/nios2/syscall.c index c6cce215a..df925fde8 100644 --- a/libc/sysdeps/linux/nios2/syscall.c +++ b/libc/sysdeps/linux/nios2/syscall.c @@ -26,16 +26,16 @@ long syscall(long sysnum, long a, long b, long c, long d, long e, long f) { - register long _r2 asm("r2")=(long)TRAP_ID_SYSCALL; - register long _r3 asm("r3")=(long)sysnum; + register long _r2 __asm__("r2")=(long)TRAP_ID_SYSCALL; + register long _r3 __asm__("r3")=(long)sysnum; - register long _r4 asm("r4")=(long)(a); - register long _r5 asm("r5")=(long)(b); - register long _r6 asm("r6")=(long)(c); - register long _r7 asm("r7")=(long)(d); - register long _r8 asm("r8")=(long)(e); - register long _r9 asm("r9")=(long)(f); - asm volatile( + register long _r4 __asm__("r4")=(long)(a); + register long _r5 __asm__("r5")=(long)(b); + register long _r6 __asm__("r6")=(long)(c); + register long _r7 __asm__("r7")=(long)(d); + register long _r8 __asm__("r8")=(long)(e); + register long _r9 __asm__("r9")=(long)(f); + __asm__ __volatile__( "trap " : "=r"(_r2), "=r"(_r3) : "0"(_r2), "1"(_r3), diff --git a/libc/sysdeps/linux/powerpc/bits/atomic.h b/libc/sysdeps/linux/powerpc/bits/atomic.h index 447195538..977bda72f 100644 --- a/libc/sysdeps/linux/powerpc/bits/atomic.h +++ b/libc/sysdeps/linux/powerpc/bits/atomic.h @@ -235,7 +235,7 @@ /* * All powerpc64 processors support the new "light weight" sync (lwsync). */ -# define atomic_read_barrier() __asm ("lwsync" ::: "memory") +# define atomic_read_barrier() __asm__ ("lwsync" ::: "memory") /* * "light weight" sync can also be used for the release barrier. */ @@ -340,7 +340,7 @@ * sync (lwsync). So the only safe option is to use normal sync * for all powerpc32 applications. */ -# define atomic_read_barrier() __asm ("sync" ::: "memory") +# define atomic_read_barrier() __asm__ ("sync" ::: "memory") #endif @@ -387,8 +387,8 @@ typedef uintmax_t uatomic_max_t; # endif #endif -#define atomic_full_barrier() __asm ("sync" ::: "memory") -#define atomic_write_barrier() __asm ("eieio" ::: "memory") +#define atomic_full_barrier() __asm__ ("sync" ::: "memory") +#define atomic_write_barrier() __asm__ ("eieio" ::: "memory") #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ \ diff --git a/libc/sysdeps/linux/powerpc/fpu_control.h b/libc/sysdeps/linux/powerpc/fpu_control.h index 8a906ecaa..442da4721 100644 --- a/libc/sysdeps/linux/powerpc/fpu_control.h +++ b/libc/sysdeps/linux/powerpc/fpu_control.h @@ -57,11 +57,11 @@ typedef unsigned int fpu_control_t __attribute__ ((__mode__ (__SI__))); /* Macros for accessing the hardware control word. */ #define _FPU_GETCW(__cw) ({ \ unsigned int env; \ - asm volatile ("mfspefscr %0" : "=r" (env)); \ + __asm__ __volatile__ ("mfspefscr %0" : "=r" (env)); \ (__cw) = env; }) #define _FPU_SETCW(__cw) ({ \ unsigned int env = __cw; \ - asm volatile ("mtspefscr %0" : : "r" (env)); }) + __asm__ __volatile__ ("mtspefscr %0" : : "r" (env)); }) #else #define _FPU_RESERVED 0xffffff00 /* These bits are reserved are not changed. */ /* IEEE: same as above, but (some) exceptions; diff --git a/libc/sysdeps/linux/sparc/bits/mathinline.h b/libc/sysdeps/linux/sparc/bits/mathinline.h index 9dd784d12..729145e14 100644 --- a/libc/sysdeps/linux/sparc/bits/mathinline.h +++ b/libc/sysdeps/linux/sparc/bits/mathinline.h @@ -198,7 +198,7 @@ __MATH_INLINE double __NTH (sqrt (double __x)) { register double __r; - __asm ("fsqrtd %1,%0" : "=f" (__r) : "f" (__x)); + __asm__ ("fsqrtd %1,%0" : "=f" (__r) : "f" (__x)); return __r; } @@ -206,7 +206,7 @@ __MATH_INLINE float __NTH (sqrtf (float __x)) { register float __r; - __asm ("fsqrts %1,%0" : "=f" (__r) : "f" (__x)); + __asm__ ("fsqrts %1,%0" : "=f" (__r) : "f" (__x)); return __r; } @@ -236,7 +236,7 @@ __MATH_INLINE double __ieee754_sqrt (double __x) { register double __r; - __asm ("fsqrtd %1,%0" : "=f" (__r) : "f" (__x)); + __asm__ ("fsqrtd %1,%0" : "=f" (__r) : "f" (__x)); return __r; } @@ -244,7 +244,7 @@ __MATH_INLINE float __ieee754_sqrtf (float __x) { register float __r; - __asm ("fsqrts %1,%0" : "=f" (__r) : "f" (__x)); + __asm__ ("fsqrts %1,%0" : "=f" (__r) : "f" (__x)); return __r; } diff --git a/libc/sysdeps/linux/sparc/brk.c b/libc/sysdeps/linux/sparc/brk.c index 7791f55e5..53f2c9c1d 100644 --- a/libc/sysdeps/linux/sparc/brk.c +++ b/libc/sysdeps/linux/sparc/brk.c @@ -33,7 +33,7 @@ int brk (void *addr) { register void *o0 __asm__("%o0") = addr; register int g1 __asm__("%g1") = 17 ; - __asm ("t 0x10" : "=r"(o0) : "r"(g1), "0"(o0) : "cc"); + __asm__ ("t 0x10" : "=r"(o0) : "r"(g1), "0"(o0) : "cc"); newbrk = o0; } diff --git a/libc/sysdeps/linux/v850/clone.c b/libc/sysdeps/linux/v850/clone.c index 515981f4e..d2e220823 100644 --- a/libc/sysdeps/linux/v850/clone.c +++ b/libc/sysdeps/linux/v850/clone.c @@ -17,19 +17,19 @@ int clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg) { - register unsigned long rval asm (SYSCALL_RET) = -EINVAL; + register unsigned long rval __asm__ (SYSCALL_RET) = -EINVAL; if (fn && child_stack) { - register unsigned long syscall asm (SYSCALL_NUM); - register unsigned long arg0 asm (SYSCALL_ARG0); - register unsigned long arg1 asm (SYSCALL_ARG1); + register unsigned long syscall __asm__ (SYSCALL_NUM); + register unsigned long arg0 __asm__ (SYSCALL_ARG0); + register unsigned long arg1 __asm__ (SYSCALL_ARG1); /* Clone this thread. */ arg0 = flags; arg1 = (unsigned long)child_stack; syscall = __NR_clone; - asm volatile ("trap " SYSCALL_SHORT_TRAP + __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP : "=r" (rval), "=r" (syscall) : "1" (syscall), "r" (arg0), "r" (arg1) : SYSCALL_SHORT_CLOBBERS); @@ -39,7 +39,7 @@ clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg) { arg0 = (*fn) (arg); syscall = __NR_exit; - asm volatile ("trap " SYSCALL_SHORT_TRAP + __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP : "=r" (rval), "=r" (syscall) : "1" (syscall), "r" (arg0) : SYSCALL_SHORT_CLOBBERS); diff --git a/libc/sysdeps/linux/v850/syscall.c b/libc/sysdeps/linux/v850/syscall.c index 2b7d67b4a..614d874f5 100644 --- a/libc/sysdeps/linux/v850/syscall.c +++ b/libc/sysdeps/linux/v850/syscall.c @@ -25,16 +25,16 @@ syscall (long num, arg_t a1, arg_t a2, arg_t a3, arg_t a4, arg_t a5, arg_t a6) off the stack even for (the majority of) system calls with fewer arguments; hopefully this won't cause any problems. A1-A4 are in registers, so they're OK. */ - register arg_t a asm (SYSCALL_ARG0) = a1; - register arg_t b asm (SYSCALL_ARG1) = a2; - register arg_t c asm (SYSCALL_ARG2) = a3; - register arg_t d asm (SYSCALL_ARG3) = a4; - register arg_t e asm (SYSCALL_ARG4) = a5; - register arg_t f asm (SYSCALL_ARG5) = a6; - register unsigned long syscall asm (SYSCALL_NUM) = num; - register unsigned long ret asm (SYSCALL_RET); + register arg_t a __asm__ (SYSCALL_ARG0) = a1; + register arg_t b __asm__ (SYSCALL_ARG1) = a2; + register arg_t c __asm__ (SYSCALL_ARG2) = a3; + register arg_t d __asm__ (SYSCALL_ARG3) = a4; + register arg_t e __asm__ (SYSCALL_ARG4) = a5; + register arg_t f __asm__ (SYSCALL_ARG5) = a6; + register unsigned long syscall __asm__ (SYSCALL_NUM) = num; + register unsigned long ret __asm__ (SYSCALL_RET); - asm ("trap " SYSCALL_LONG_TRAP + __asm__ ("trap " SYSCALL_LONG_TRAP : "=r" (ret) : "r" (syscall), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f) : SYSCALL_CLOBBERS); diff --git a/libc/sysdeps/linux/x86_64/bits/atomic.h b/libc/sysdeps/linux/x86_64/bits/atomic.h index 133a68d19..67a512568 100644 --- a/libc/sysdeps/linux/x86_64/bits/atomic.h +++ b/libc/sysdeps/linux/x86_64/bits/atomic.h @@ -321,4 +321,4 @@ typedef uintmax_t uatomic_max_t; __result; }) -#define atomic_delay() asm ("rep; nop") +#define atomic_delay() __asm__ ("rep; nop") diff --git a/libc/sysdeps/linux/x86_64/bits/syscalls.h b/libc/sysdeps/linux/x86_64/bits/syscalls.h index d31304430..7431d6d7f 100644 --- a/libc/sysdeps/linux/x86_64/bits/syscalls.h +++ b/libc/sysdeps/linux/x86_64/bits/syscalls.h @@ -141,7 +141,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ unsigned long resultvar; \ LOAD_ARGS_##nr (args) \ LOAD_REGS_##nr \ - asm volatile ( \ + __asm__ __volatile__ ( \ "syscall\n\t" \ : "=a" (resultvar) \ : "0" (name) ASM_ARGS_##nr : "memory", "cc", "r11", "cx"); \ @@ -165,7 +165,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ long int __arg1 = (long) (a1); \ LOAD_ARGS_0 () #define LOAD_REGS_1 \ - register long int _a1 asm ("rdi") = __arg1; \ + register long int _a1 __asm__ ("rdi") = __arg1; \ LOAD_REGS_0 #define ASM_ARGS_1 ASM_ARGS_0, "r" (_a1) @@ -173,7 +173,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ long int __arg2 = (long) (a2); \ LOAD_ARGS_1 (a1) #define LOAD_REGS_2 \ - register long int _a2 asm ("rsi") = __arg2; \ + register long int _a2 __asm__ ("rsi") = __arg2; \ LOAD_REGS_1 #define ASM_ARGS_2 ASM_ARGS_1, "r" (_a2) @@ -181,7 +181,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ long int __arg3 = (long) (a3); \ LOAD_ARGS_2 (a1, a2) #define LOAD_REGS_3 \ - register long int _a3 asm ("rdx") = __arg3; \ + register long int _a3 __asm__ ("rdx") = __arg3; \ LOAD_REGS_2 #define ASM_ARGS_3 ASM_ARGS_2, "r" (_a3) @@ -189,7 +189,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ long int __arg4 = (long) (a4); \ LOAD_ARGS_3 (a1, a2, a3) #define LOAD_REGS_4 \ - register long int _a4 asm ("r10") = __arg4; \ + register long int _a4 __asm__ ("r10") = __arg4; \ LOAD_REGS_3 #define ASM_ARGS_4 ASM_ARGS_3, "r" (_a4) @@ -197,7 +197,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ long int __arg5 = (long) (a5); \ LOAD_ARGS_4 (a1, a2, a3, a4) #define LOAD_REGS_5 \ - register long int _a5 asm ("r8") = __arg5; \ + register long int _a5 __asm__ ("r8") = __arg5; \ LOAD_REGS_4 #define ASM_ARGS_5 ASM_ARGS_4, "r" (_a5) @@ -205,7 +205,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \ long int __arg6 = (long) (a6); \ LOAD_ARGS_5 (a1, a2, a3, a4, a5) #define LOAD_REGS_6 \ - register long int _a6 asm ("r9") = __arg6; \ + register long int _a6 __asm__ ("r9") = __arg6; \ LOAD_REGS_5 #define ASM_ARGS_6 ASM_ARGS_5, "r" (_a6) diff --git a/libc/sysdeps/linux/x86_64/brk.c b/libc/sysdeps/linux/x86_64/brk.c index 22f006d7e..eddfd9830 100644 --- a/libc/sysdeps/linux/x86_64/brk.c +++ b/libc/sysdeps/linux/x86_64/brk.c @@ -29,7 +29,7 @@ int brk (void *addr) { void *__unbounded newbrk; - asm ("syscall\n" + __asm__ ("syscall\n" : "=a" (newbrk) : "0" (__NR_brk), "D" (__ptrvalue (addr)) : "r11","rcx","memory"); diff --git a/libc/sysdeps/linux/x86_64/sigaction.c b/libc/sysdeps/linux/x86_64/sigaction.c index 8d9cbaaae..2812de169 100644 --- a/libc/sysdeps/linux/x86_64/sigaction.c +++ b/libc/sysdeps/linux/x86_64/sigaction.c @@ -38,8 +38,8 @@ extern __typeof(sigaction) __libc_sigaction; #ifdef __NR_rt_sigaction /* Using the hidden attribute here does not change the code but it helps to avoid warnings. */ -extern void restore_rt (void) asm ("__restore_rt") attribute_hidden; -extern void restore (void) asm ("__restore") attribute_hidden; +extern void restore_rt (void) __asm__ ("__restore_rt") attribute_hidden; +extern void restore (void) __asm__ ("__restore") attribute_hidden; libc_hidden_proto(memcpy) @@ -74,7 +74,7 @@ __libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oact) } #else -extern void restore (void) asm ("__restore") attribute_hidden; +extern void restore (void) __asm__ ("__restore") attribute_hidden; /* If ACT is not NULL, change the action for SIG to *ACT. If OACT is not NULL, put the old action for SIG in *OACT. */ @@ -98,7 +98,7 @@ __libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oact) kact.sa_restorer = &restore; } - asm volatile ("syscall\n" + __asm__ __volatile__ ("syscall\n" : "=a" (result) : "0" (__NR_sigaction), "mr" (sig), "c" (act ? __ptrvalue (&kact) : 0), diff --git a/libc/sysdeps/linux/xtensa/bits/syscalls.h b/libc/sysdeps/linux/xtensa/bits/syscalls.h index 76bcf404f..e0506e4cc 100644 --- a/libc/sysdeps/linux/xtensa/bits/syscalls.h +++ b/libc/sysdeps/linux/xtensa/bits/syscalls.h @@ -53,7 +53,7 @@ #include <errno.h> #define STR(s) #s -#define LD_ARG(n,ar) register int _a##n asm (STR(a##n)) = (int) (ar) +#define LD_ARG(n,ar) register int _a##n __asm__ (STR(a##n)) = (int) (ar) #define LD_ARGS_0() #define LD_ARGS_1(a0) LD_ARG(6,a0) @@ -90,7 +90,7 @@ #define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \ ({ LD_ARG(2, name); \ LD_ARGS_##nr(args); \ - asm volatile ("syscall\n" \ + __asm__ __volatile__ ("syscall\n" \ : "=a" (_a2) \ : ASM_ARGS_##nr \ : "memory"); \ |