diff options
author | Khem Raj <raj.khem@gmail.com> | 2009-09-06 01:02:02 -0700 |
---|---|---|
committer | Khem Raj <raj.khem@gmail.com> | 2009-09-06 01:02:50 -0700 |
commit | 3e151bdde55f608385099d761a22c2e8951448b0 (patch) | |
tree | eb5cd670707f50fe7050793ab977c94d2924d899 /libc/sysdeps/linux/arm | |
parent | 6943b8336ed253a2ad90816f8d16aa392d0de606 (diff) |
Revert "syscall.c: Use common syscall.c for ARM"
This reverts commit b1913a876059949e6c309bafade55e9425ef33fb.
OABI is still a requirement. So we override the common syscall
implementation specifically for arm port.
Signed-off-by: Khem Raj <raj.khem@gmail.com>
Diffstat (limited to 'libc/sysdeps/linux/arm')
-rw-r--r-- | libc/sysdeps/linux/arm/Makefile.arch | 3 | ||||
-rw-r--r-- | libc/sysdeps/linux/arm/syscall-eabi.S | 73 | ||||
-rw-r--r-- | libc/sysdeps/linux/arm/syscall.c | 53 |
3 files changed, 129 insertions, 0 deletions
diff --git a/libc/sysdeps/linux/arm/Makefile.arch b/libc/sysdeps/linux/arm/Makefile.arch index 76f617997..e7296d3d9 100644 --- a/libc/sysdeps/linux/arm/Makefile.arch +++ b/libc/sysdeps/linux/arm/Makefile.arch @@ -19,9 +19,12 @@ ifeq ($(CONFIG_ARM_EABI),y) CSRC += aeabi_assert.c aeabi_atexit.c aeabi_errno_addr.c \ aeabi_localeconv.c aeabi_memclr.c aeabi_memcpy.c \ aeabi_memmove.c aeabi_memset.c find_exidx.c +SSRC += syscall-eabi.S ifeq ($(UCLIBC_HAS_WCHAR),y) CSRC += aeabi_mb_cur_max.c endif +else +CSRC += syscall.c endif ifeq ($(CONFIG_ARM_EABI),y) diff --git a/libc/sysdeps/linux/arm/syscall-eabi.S b/libc/sysdeps/linux/arm/syscall-eabi.S new file mode 100644 index 000000000..b9318821b --- /dev/null +++ b/libc/sysdeps/linux/arm/syscall-eabi.S @@ -0,0 +1,73 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <sys/syscall.h> +#include <bits/arm_asm.h> + +/* In the EABI syscall interface, we don't need a special syscall to + implement syscall(). It won't work reliably with 64-bit arguments + (but that is true on many modern platforms). */ + +.text +.global syscall +.type syscall,%function +.align 4 +#if defined(THUMB1_ONLY) +.thumb_func +syscall: + push {r4, r5, r6, r7} + mov ip, r0 + mov r0, r1 + mov r1, r2 + mov r2, r3 + add r7, sp, #(4 * 4) + ldmia r7!, {r3, r4, r5, r6} + mov r7, ip + swi 0x0 + pop {r4, r5, r6, r7} + ldr r1, =0xfffff000 + cmp r0, r1 + bcs 1f + bx lr +1: + push {r3, lr} + bl __syscall_error + POP_RET +.pool +#else +syscall: + mov ip, sp + stmfd sp!, {r4, r5, r6, r7} + mov r7, r0 + mov r0, r1 + mov r1, r2 + mov r2, r3 + ldmfd ip, {r3, r4, r5, r6} + swi 0x0 + ldmfd sp!, {r4, r5, r6, r7} + cmn r0, #4096 + IT(t, cc) +#if defined(__USE_BX__) + bxcc lr +#else + movcc pc, lr +#endif + b __syscall_error +#endif + +.size syscall,.-syscall diff --git a/libc/sysdeps/linux/arm/syscall.c b/libc/sysdeps/linux/arm/syscall.c new file mode 100644 index 000000000..60fbcf89b --- /dev/null +++ b/libc/sysdeps/linux/arm/syscall.c @@ -0,0 +1,53 @@ +/* vi: set sw=4 ts=4: */ +/* syscall for arm/uClibc + * + * Copyright (C) 2002 by Erik Andersen <andersen@uclibc.org> + * + * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + */ + +#include <features.h> +#include <errno.h> +#include <sys/types.h> +#include <sys/syscall.h> + + +long syscall(long sysnum, long a, long b, long c, long d, long e, long f) +{ +#if !defined(__thumb__) + register long _r0 __asm__("r0")=(long)(sysnum); + register long _r6 __asm__("r6")=(long)(f); + register long _r5 __asm__("r5")=(long)(e); + register long _r4 __asm__("r4")=(long)(d); + register long _r3 __asm__("r3")=(long)(c); + register long _r2 __asm__("r2")=(long)(b); + register long _r1 __asm__("r1")=(long)(a); + __asm__ __volatile__( + "swi %1" + : "=r"(_r0) + : "i"(__NR_syscall), "r"(_r0), "r"(_r1), + "r"(_r2), "r"(_r3), "r"(_r4), "r"(_r5), + "r"(_r6) + : "memory"); +#else + register long _r7 __asm__("r7")=(long)(sysnum); + register long _r5 __asm__("r5")=(long)(f); + register long _r4 __asm__("r4")=(long)(e); + register long _r3 __asm__("r3")=(long)(d); + register long _r2 __asm__("r2")=(long)(c); + register long _r1 __asm__("r1")=(long)(b); + register long _r0 __asm__("r0")=(long)(a); + __asm__ __volatile__( + "swi 0" + : "=r"(_r0) + : "r"(_r0), "r"(_r1), "r"(_r2), "r"(_r3), + "r"(_r4), "r"(_r5), "r"(_r7) + : "memory"); +#endif + if(_r0 >=(unsigned long) -4095) { + long err = _r0; + (*__errno_location())=(-err); + _r0=(unsigned long) -1; + } + return (long) _r0; +} |