diff options
Diffstat (limited to 'libc/sysdeps')
66 files changed, 3724 insertions, 635 deletions
| diff --git a/libc/sysdeps/linux/arm/Makefile.arch b/libc/sysdeps/linux/arm/Makefile.arch index c3c55258a..bec06ff44 100644 --- a/libc/sysdeps/linux/arm/Makefile.arch +++ b/libc/sysdeps/linux/arm/Makefile.arch @@ -5,14 +5,24 @@  # Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.  # -CSRC := brk.c ioperm.c iopl.c mmap.c sigaction.c __syscall_error.c +CSRC := brk.c ioperm.c iopl.c mmap.c __syscall_error.c + +ifneq ($(UCLIBC_HAS_THREADS_NATIVE),y) +CSRC += sigaction.c +endif  SSRC := \ -	__longjmp.S vfork.S clone.S setjmp.S bsd-setjmp.S \ +	__longjmp.S setjmp.S bsd-setjmp.S \  	bsd-_setjmp.S sigrestorer.S mmap64.S +ifeq ($(UCLIBC_HAS_THREADS_NATIVE),y) +SSRC += libc-aeabi_read_tp.S libc-thumb_atomics.S +else +SSRC += vfork.S clone.S +endif +  ifeq ($(UCLIBC_HAS_ADVANCED_REALTIME),y) -        CSRC += posix_fadvise.c posix_fadvise64.c +CSRC += posix_fadvise.c posix_fadvise64.c  endif  ifeq ($(CONFIG_ARM_EABI),y) diff --git a/libc/sysdeps/linux/arm/clone.S b/libc/sysdeps/linux/arm/clone.S index d9483735d..fdc05b88b 100644 --- a/libc/sysdeps/linux/arm/clone.S +++ b/libc/sysdeps/linux/arm/clone.S @@ -30,12 +30,12 @@  /* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg); */  .text -.global clone -.type clone,%function +.global __clone +.type __clone,%function  .align 2  #if defined(THUMB1_ONLY)  .thumb_func -clone: +__clone:  	@ sanity check args  	cmp	r0, #0  	beq	__einval @@ -52,9 +52,15 @@ clone:  	@ get flags  	mov	r0, r2  	@ new sp is already in r1 +	@ load remaining arguments off the stack +	stmfd	sp!, {r4} +	ldr	r2, [sp, #4] +	ldr	r3, [sp, #8] +	ldr	r4, [sp, #12]  	DO_CALL (clone)  	movs	a1, a1  	blt	__error +	ldmnefd sp!, {r4}  	beq	1f  	bx	lr  1: @@ -80,7 +86,7 @@ __error:  	POP_RET  .pool  #else -clone: +__clone:  	@ sanity check args  	cmp	r0, #0  	IT(te, ne) @@ -98,9 +104,15 @@ clone:  	@ get flags  	mov	r0, r2  	@ new sp is already in r1 +	@ load remaining arguments off the stack +	stmfd	sp!, {r4} +	ldr	r2, [sp, #4] +	ldr	r3, [sp, #8] +	ldr	r4, [sp, #12]  	DO_CALL (clone)  	movs	a1, a1  	blt	__error +	ldmnefd	sp!, {r4}  	IT(t, ne)  #if defined(__USE_BX__)  	bxne	lr @@ -120,6 +132,7 @@ __error:  	b	__syscall_error  #endif -.size clone,.-clone +.size __clone,.-__clone +weak_alias(__clone, clone)  #endif diff --git a/libc/sysdeps/linux/arm/libc-aeabi_read_tp.S b/libc/sysdeps/linux/arm/libc-aeabi_read_tp.S new file mode 100644 index 000000000..3aa135bf2 --- /dev/null +++ b/libc/sysdeps/linux/arm/libc-aeabi_read_tp.S @@ -0,0 +1 @@ +#include <ldso/ldso/arm/aeabi_read_tp.S> diff --git a/libc/sysdeps/linux/arm/libc-thumb_atomics.S b/libc/sysdeps/linux/arm/libc-thumb_atomics.S new file mode 100644 index 000000000..e7bc8950d --- /dev/null +++ b/libc/sysdeps/linux/arm/libc-thumb_atomics.S @@ -0,0 +1 @@ +#include <ldso/ldso/arm/thumb_atomics.S> diff --git a/libc/sysdeps/linux/arm/sysdep.h b/libc/sysdeps/linux/arm/sysdep.h new file mode 100644 index 000000000..b1ae2fdb0 --- /dev/null +++ b/libc/sysdeps/linux/arm/sysdep.h @@ -0,0 +1,367 @@ +/* Assembler macros for ARM. +   Copyright (C) 1997, 1998, 2003 Free Software Foundation, Inc. +   This file is part of the GNU C Library. + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +#ifndef _LINUX_ARM_SYSDEP_H +#define _LINUX_ARM_SYSDEP_H 1 + +#include <common/sysdep.h> + +#include <sys/syscall.h> +/* For Linux we can use the system call table in the header file +	/usr/include/asm/unistd.h +   of the kernel.  But these symbols do not follow the SYS_* syntax +   so we have to redefine the `SYS_ify' macro here.  */ +#undef SYS_ify +#define SWI_BASE  (0x900000) +#define SYS_ify(syscall_name)	(__NR_##syscall_name) + +#ifdef	__ASSEMBLER__ + +/* Syntactic details of assembler.  */ + +#define ALIGNARG(log2) log2 +/* For ELF we need the `.type' directive to make shared libs work right.  */ +#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,%##typearg; +#define ASM_SIZE_DIRECTIVE(name) .size name,.-name + +/* In ELF C symbols are asm symbols.  */ +#undef	NO_UNDERSCORES +#define NO_UNDERSCORES + +#define PLTJMP(_x)	_x##(PLT) + +/* APCS-32 doesn't preserve the condition codes across function call. */ +#ifdef __APCS_32__ +#define LOADREGS(cond, base, reglist...)\ +	ldm##cond	base,reglist +#ifdef __USE_BX__ +#define RETINSTR(cond, reg)	\ +	bx##cond	reg +#define DO_RET(_reg)		\ +	bx _reg +#else +#define RETINSTR(cond, reg)	\ +	mov##cond	pc, reg +#define DO_RET(_reg)		\ +	mov pc, _reg +#endif +#else  /* APCS-26 */ +#define LOADREGS(cond, base, reglist...)	\ +	ldm##cond	base,reglist^ +#define RETINSTR(cond, reg)	\ +	mov##cond##s	pc, reg +#define DO_RET(_reg)		\ +	movs pc, _reg +#endif + +/* Define an entry point visible from C.  */ +#define	ENTRY(name)						\ +  ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name);			\ +  ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),function)		\ +  .align ALIGNARG(4);						\ +  name##:							\ +  CALL_MCOUNT + +#undef	END +#define END(name)						\ +  ASM_SIZE_DIRECTIVE(name) + +/* If compiled for profiling, call `mcount' at the start of each function.  */ +#ifdef	PROF +#define CALL_MCOUNT			\ +	str	lr,[sp, #-4]!	;	\ +	bl	PLTJMP(mcount)	;	\ +	ldr	lr, [sp], #4	; +#else +#define CALL_MCOUNT		/* Do nothing.  */ +#endif + +#ifdef	NO_UNDERSCORES +/* Since C identifiers are not normally prefixed with an underscore +   on this system, the asm identifier `syscall_error' intrudes on the +   C name space.  Make sure we use an innocuous name.  */ +#define	syscall_error	__syscall_error +#define mcount		_mcount +#endif +/* Linux uses a negative return value to indicate syscall errors, +   unlike most Unices, which use the condition codes' carry flag. + +   Since version 2.1 the return value of a system call might be +   negative even if the call succeeded.  E.g., the `lseek' system call +   might return a large offset.  Therefore we must not anymore test +   for < 0, but test for a real error by making sure the value in R0 +   is a real error number.  Linus said he will make sure the no syscall +   returns a value in -1 .. -4095 as a valid result so we can safely +   test with -4095.  */ + +#undef	PSEUDO +#define	PSEUDO(name, syscall_name, args)				\ +  .text;								\ +  ENTRY (name);								\ +    DO_CALL (syscall_name, args);					\ +    cmn r0, $4096; + +#define PSEUDO_RET							\ +    RETINSTR(cc, lr);							\ +    b PLTJMP(SYSCALL_ERROR) +#undef ret +#define ret PSEUDO_RET + +#undef	PSEUDO_END +#define	PSEUDO_END(name)						\ +  SYSCALL_ERROR_HANDLER							\ +  END (name) + +#undef	PSEUDO_NOERRNO +#define	PSEUDO_NOERRNO(name, syscall_name, args)			\ +  .text;								\ +  ENTRY (name);								\ +    DO_CALL (syscall_name, args); + +#define PSEUDO_RET_NOERRNO						\ +    DO_RET (lr); + +#undef ret_NOERRNO +#define ret_NOERRNO PSEUDO_RET_NOERRNO + +#undef	PSEUDO_END_NOERRNO +#define	PSEUDO_END_NOERRNO(name)					\ +  END (name) + +/* The function has to return the error code.  */ +#undef	PSEUDO_ERRVAL +#define	PSEUDO_ERRVAL(name, syscall_name, args) \ +  .text;								\ +  ENTRY (name)								\ +    DO_CALL (syscall_name, args);					\ +    rsb r0, r0, #0 + +#undef	PSEUDO_END_ERRVAL +#define	PSEUDO_END_ERRVAL(name) \ +  END (name) + +#define ret_ERRVAL PSEUDO_RET_NOERRNO + +#if NOT_IN_libc +# define SYSCALL_ERROR __local_syscall_error +# ifdef RTLD_PRIVATE_ERRNO +#  define SYSCALL_ERROR_HANDLER					\ +__local_syscall_error:						\ +       ldr     r1, 1f;						\ +       rsb     r0, r0, #0;					\ +0:     str     r0, [pc, r1];					\ +       mvn     r0, #0;						\ +       DO_RET(lr);						\ +1:     .word C_SYMBOL_NAME(rtld_errno) - 0b - 8; +# else +#  define SYSCALL_ERROR_HANDLER					\ +__local_syscall_error:						\ +	str	lr, [sp, #-4]!;					\ +	str	r0, [sp, #-4]!;					\ +	bl	PLTJMP(C_SYMBOL_NAME(__errno_location)); 	\ +	ldr	r1, [sp], #4;					\ +	rsb	r1, r1, #0;					\ +	str	r1, [r0];					\ +	mvn	r0, #0;						\ +	ldr	pc, [sp], #4; +# endif +#else +# define SYSCALL_ERROR_HANDLER	/* Nothing here; code in sysdep.S is used.  */ +# define SYSCALL_ERROR __syscall_error +#endif + +/* Linux takes system call args in registers: +	syscall number	in the SWI instruction +	arg 1		r0 +	arg 2		r1 +	arg 3		r2 +	arg 4		r3 +	arg 5		r4	(this is different from the APCS convention) +	arg 6		r5 +	arg 7		r6 + +   The compiler is going to form a call by coming here, through PSEUDO, with +   arguments +	syscall number	in the DO_CALL macro +	arg 1		r0 +	arg 2		r1 +	arg 3		r2 +	arg 4		r3 +	arg 5		[sp] +	arg 6		[sp+4] +	arg 7		[sp+8] + +   We need to shuffle values between R4..R6 and the stack so that the +   caller's v1..v3 and stack frame are not corrupted, and the kernel +   sees the right arguments. + +*/ + +#undef	DO_CALL +#if defined(__ARM_EABI__) +#define DO_CALL(syscall_name, args)		\ +    DOARGS_##args				\ +    mov ip, r7;					\ +    ldr r7, =SYS_ify (syscall_name);		\ +    swi 0x0;					\ +    mov r7, ip;					\ +    UNDOARGS_##args +#else +#define DO_CALL(syscall_name, args)		\ +    DOARGS_##args				\ +    swi SYS_ify (syscall_name); 		\ +    UNDOARGS_##args +#endif + +#define DOARGS_0 /* nothing */ +#define DOARGS_1 /* nothing */ +#define DOARGS_2 /* nothing */ +#define DOARGS_3 /* nothing */ +#define DOARGS_4 /* nothing */ +#define DOARGS_5 str r4, [sp, $-4]!; ldr r4, [sp, $4]; +#define DOARGS_6 mov ip, sp; stmfd sp!, {r4, r5}; ldmia ip, {r4, r5}; +#define DOARGS_7 mov ip, sp; stmfd sp!, {r4, r5, r6}; ldmia ip, {r4, r5, r6}; + +#define UNDOARGS_0 /* nothing */ +#define UNDOARGS_1 /* nothing */ +#define UNDOARGS_2 /* nothing */ +#define UNDOARGS_3 /* nothing */ +#define UNDOARGS_4 /* nothing */ +#define UNDOARGS_5 ldr r4, [sp], $4; +#define UNDOARGS_6 ldmfd sp!, {r4, r5}; +#define UNDOARGS_7 ldmfd sp!, {r4, r5, r6}; + +#else /* not __ASSEMBLER__ */ +/* Define a macro which expands into the inline wrapper code for a system +   call.  */ +#undef INLINE_SYSCALL +#define INLINE_SYSCALL(name, nr, args...)				\ +  ({ unsigned int _sys_result = INTERNAL_SYSCALL (name, , nr, args);	\ +     if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (_sys_result, ), 0))	\ +       {								\ +	 __set_errno (INTERNAL_SYSCALL_ERRNO (_sys_result, ));		\ +	 _sys_result = (unsigned int) -1;				\ +       }								\ +     (int) _sys_result; }) + +#undef INTERNAL_SYSCALL_DECL +#define INTERNAL_SYSCALL_DECL(err) do { } while (0) + +#undef INTERNAL_SYSCALL_RAW +#if defined(__thumb__) +/* Hide the use of r7 from the compiler, this would be a lot + * easier but for the fact that the syscalls can exceed 255. + * For the moment the LOAD_ARG_7 is sacrificed. + * We can't use push/pop inside the asm because that breaks + * unwinding (ie. thread cancellation). + */ +#define INTERNAL_SYSCALL_RAW(name, err, nr, args...)		\ +  ({ unsigned int _sys_result;					\ +    {								\ +      int _sys_buf[2];						\ +      register int _a1 __asm__ ("a1");				\ +      register int *_v3 __asm__ ("v3") = _sys_buf;		\ +      LOAD_ARGS_##nr (args)					\ +      *_v3 = (int) (name);					\ +      __asm__ __volatile__ ("str	r7, [v3, #4]\n"		\ +                    "\tldr      r7, [v3]\n"			\ +                    "\tswi      0       @ syscall " #name "\n"	\ +                    "\tldr      r7, [v3, #4]"			\ +                   : "=r" (_a1)					\ +                    : "r" (_v3) ASM_ARGS_##nr			\ +                    : "memory");				\ +      _sys_result = _a1;					\ +    }								\ +    (int) _sys_result; }) +#elif defined(__ARM_EABI__) +#define INTERNAL_SYSCALL_RAW(name, err, nr, args...)		\ +  ({unsigned int _sys_result;					\ +     {								\ +       register int _a1 __asm__ ("r0"), _nr __asm__ ("r7");	\ +       LOAD_ARGS_##nr (args)					\ +       _nr = name;						\ +       __asm__ __volatile__ ("swi	0x0 @ syscall " #name	\ +		     : "=r" (_a1)				\ +		     : "r" (_nr) ASM_ARGS_##nr			\ +		     : "memory");				\ +       _sys_result = _a1;					\ +     }								\ +     (int) _sys_result; }) +#else /* !defined(__ARM_EABI__) */ +#define INTERNAL_SYSCALL_RAW(name, err, nr, args...)		\ +  ({ unsigned int _sys_result;					\ +     {								\ +       register int _a1 __asm__ ("a1");				\ +       LOAD_ARGS_##nr (args)					\ +       __asm__ __volatile__ ("swi	%1 @ syscall " #name	\ +		     : "=r" (_a1)				\ +		     : "i" (name) ASM_ARGS_##nr			\ +		     : "memory");				\ +       _sys_result = _a1;					\ +     }								\ +     (int) _sys_result; }) +#endif + +#undef INTERNAL_SYSCALL +#define INTERNAL_SYSCALL(name, err, nr, args...)		\ +	INTERNAL_SYSCALL_RAW(SYS_ify(name), err, nr, args) + +#undef INTERNAL_SYSCALL_ARM +#define INTERNAL_SYSCALL_ARM(name, err, nr, args...)		\ +	INTERNAL_SYSCALL_RAW(__ARM_NR_##name, err, nr, args) + +#undef INTERNAL_SYSCALL_ERROR_P +#define INTERNAL_SYSCALL_ERROR_P(val, err) \ +  ((unsigned int) (val) >= 0xfffff001u) + +#undef INTERNAL_SYSCALL_ERRNO +#define INTERNAL_SYSCALL_ERRNO(val, err)	(-(val)) + +#if defined(__ARM_EABI__) +#undef INTERNAL_SYSCALL_NCS +#define INTERNAL_SYSCALL_NCS(number, err, nr, args...)		\ +	INTERNAL_SYSCALL_RAW(number, err, nr, args) +#else +/* We can't implement non-constant syscalls directly since the syscall +   number is normally encoded in the instruction.  So use SYS_syscall.  */ +#undef INTERNAL_SYSCALL_NCS +#define INTERNAL_SYSCALL_NCS(number, err, nr, args...)		\ +	INTERNAL_SYSCALL_NCS_##nr (number, err, args) + +#define INTERNAL_SYSCALL_NCS_0(number, err, args...)		\ +	INTERNAL_SYSCALL (syscall, err, 1, number, args) +#define INTERNAL_SYSCALL_NCS_1(number, err, args...)		\ +	INTERNAL_SYSCALL (syscall, err, 2, number, args) +#define INTERNAL_SYSCALL_NCS_2(number, err, args...)		\ +	INTERNAL_SYSCALL (syscall, err, 3, number, args) +#define INTERNAL_SYSCALL_NCS_3(number, err, args...)		\ +	INTERNAL_SYSCALL (syscall, err, 4, number, args) +#define INTERNAL_SYSCALL_NCS_4(number, err, args...)		\ +	INTERNAL_SYSCALL (syscall, err, 5, number, args) +#define INTERNAL_SYSCALL_NCS_5(number, err, args...)		\ +	INTERNAL_SYSCALL (syscall, err, 6, number, args) +#endif + +#endif	/* __ASSEMBLER__ */ + +/* Pointer mangling is not yet supported for ARM.  */ +#define PTR_MANGLE(var) (void) (var) +#define PTR_DEMANGLE(var) (void) (var) + +#endif /* linux/arm/sysdep.h */ diff --git a/libc/sysdeps/linux/arm/vfork.S b/libc/sysdeps/linux/arm/vfork.S index 42595b026..17d6a4db0 100644 --- a/libc/sysdeps/linux/arm/vfork.S +++ b/libc/sysdeps/linux/arm/vfork.S @@ -12,6 +12,15 @@  #include <bits/errno.h>  #include <sys/syscall.h> +#ifndef SAVE_PID +#define SAVE_PID +#endif + +#ifndef RESTORE_PID +#define RESTORE_PID +#endif + +  #ifdef __NR_fork  .text  .global	__vfork @@ -23,7 +32,9 @@  .thumb_func  __vfork:  #ifdef __NR_vfork +	SAVE_PID  	DO_CALL (vfork) +	RESTORE_PID  	ldr		r1, =0xfffff000  	cmp		r0, r1  	bcs		1f @@ -57,7 +68,9 @@ __error:  __vfork:  #ifdef __NR_vfork +	SAVE_PID  	DO_CALL (vfork) +	RESTORE_PID  	cmn	r0, #4096  	IT(t, cc)  #if defined(__USE_BX__) diff --git a/libc/sysdeps/linux/common/Makefile.in b/libc/sysdeps/linux/common/Makefile.in index 172feb162..a6fa6d091 100644 --- a/libc/sysdeps/linux/common/Makefile.in +++ b/libc/sysdeps/linux/common/Makefile.in @@ -34,6 +34,17 @@ CSRC := $(filter-out capget.c capset.c inotify.c ioperm.c iopl.c madvise.c \  	sync_file_range.c sysctl.c sysinfo.c timerfd.c uselib.c vhangup.c,$(CSRC))  endif +ifeq ($(UCLIBC_HAS_THREADS_NATIVE),y) +CSRC := $(filter-out fork.c getpid.c raise.c open.c close.c read.c write.c, $(CSRC)) +ifeq ($(TARGET_ARCH),arm) +CSRC := $(filter-out vfork.c, $(CSRC)) +else ifeq ($(TARGET_ARCH),x86_64) +CSRC := $(filter-out vfork.c, $(CSRC)) +else +CSRC := $(filter-out waitpid.c, $(CSRC)) +endif +endif +  ifneq ($(UCLIBC_BSD_SPECIFIC),y)  # we need these internally: getdomainname.c  CSRC := $(filter-out mincore.c setdomainname.c,$(CSRC)) @@ -75,6 +86,18 @@ ifneq ($(UCLIBC_SV4_DEPRECATED),y)  CSRC := $(filter-out ustat.c,$(CSRC))  endif +ifeq ($(TARGET_ARCH),sh) +CSRC := $(filter-out longjmp.c vfork.c,$(CSRC)) +endif + +ifeq ($(TARGET_ARCH),sparc) +CSRC := $(filter-out vfork.c,$(CSRC)) +endif + +ifeq ($(TARGET_ARCH),i386) +CSRC := $(filter-out vfork.c,$(CSRC)) +endif +  # fails for some reason  ifneq ($(strip $(ARCH_OBJS)),)  CSRC := $(filter-out $(notdir $(ARCH_OBJS:.o=.c)) $(ARCH_OBJ_FILTEROUT),$(CSRC)) diff --git a/libc/sysdeps/linux/common/__rt_sigtimedwait.c b/libc/sysdeps/linux/common/__rt_sigtimedwait.c index f9ec0eabf..554c6b9cb 100644 --- a/libc/sysdeps/linux/common/__rt_sigtimedwait.c +++ b/libc/sysdeps/linux/common/__rt_sigtimedwait.c @@ -2,44 +2,97 @@  /*   * __rt_sigtimedwait() for uClibc   * - * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org> + * Copyright (C) 2006 by Steven Hill <sjhill@realitydiluted.com> + * Copyright (C) 2000-2004 by Erik Andersen <andersen@codepoet.org>   * - * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. + * GNU Library General Public License (LGPL) version 2 or later.   */  #include <sys/syscall.h>  #include <signal.h> -#define __need_NULL -#include <stddef.h> +#include <string.h> +libc_hidden_proto(memcpy)  #ifdef __NR_rt_sigtimedwait -#define __NR___rt_sigtimedwait __NR_rt_sigtimedwait -static _syscall4(int, __rt_sigtimedwait, const sigset_t *, set, siginfo_t *, info, -		  const struct timespec *, timeout, size_t, setsize) +#include <string.h> +libc_hidden_proto(memcpy) -int sigwaitinfo(const sigset_t * set, siginfo_t * info) +# ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#  include <sysdep-cancel.h> + +static int do_sigtimedwait(const sigset_t *set, siginfo_t *info, +						   const struct timespec *timeout)  { -	return __rt_sigtimedwait(set, info, NULL, _NSIG / 8); +#  ifdef SIGCANCEL +	sigset_t tmpset; + +	if (set != NULL && (__builtin_expect (__sigismember (set, SIGCANCEL), 0) +#   ifdef SIGSETXID +		|| __builtin_expect (__sigismember (set, SIGSETXID), 0) +#   endif +		)) +	{ +		/* Create a temporary mask without the bit for SIGCANCEL set.  */ +		// We are not copying more than we have to. +		memcpy (&tmpset, set, _NSIG / 8); +		__sigdelset (&tmpset, SIGCANCEL); +#   ifdef SIGSETXID +		__sigdelset (&tmpset, SIGSETXID); +#   endif +		set = &tmpset; +	} +#  endif + +	/* XXX The size argument hopefully will have to be changed to the +	   real size of the user-level sigset_t.  */ +	int result = INLINE_SYSCALL (rt_sigtimedwait, 4, set, info, +								 timeout, _NSIG / 8); + +	/* The kernel generates a SI_TKILL code in si_code in case tkill is +	   used.  tkill is transparently used in raise().  Since having +	   SI_TKILL as a code is useful in general we fold the results +	   here.  */ +	if (result != -1 && info != NULL && info->si_code == SI_TKILL) +		info->si_code = SI_USER; + +	return result;  } -int sigtimedwait(const sigset_t * set, siginfo_t * info, -				 const struct timespec *timeout) +/* Return any pending signal or wait for one for the given time.  */ +int __sigtimedwait(const sigset_t *set, siginfo_t *info, +				   const struct timespec *timeout)  { -	return __rt_sigtimedwait(set, info, timeout, _NSIG / 8); +	if(SINGLE_THREAD_P) +		return do_sigtimedwait(set, info, timeout); + +	int oldtype = LIBC_CANCEL_ASYNC(); + +	/* XXX The size argument hopefully will have to be changed to the +	   real size of the user-level sigset_t.  */ +	int result = do_sigtimedwait(set, info, timeout); + +	LIBC_CANCEL_RESET(oldtype); + +	return result;  } -#else -int sigwaitinfo(const sigset_t * set, siginfo_t * info) +# else +#  define __need_NULL +#  include <stddef.h> +#  define __NR___rt_sigtimedwait __NR_rt_sigtimedwait +static _syscall4(int, __rt_sigtimedwait, const sigset_t *, set, +				 siginfo_t *, info, const struct timespec *, timeout, +				 size_t, setsize); + +int attribute_hidden __sigtimedwait(const sigset_t * set, siginfo_t * info, +									const struct timespec *timeout)  { -	if (set == NULL) -		__set_errno(EINVAL); -	else -		__set_errno(ENOSYS); -	return -1; +	return __rt_sigtimedwait(set, info, timeout, _NSIG / 8);  } - -int sigtimedwait(const sigset_t * set, siginfo_t * info, -				 const struct timespec *timeout) +# endif /* !__UCLIBC_HAS_THREADS_NATIVE__ */ +#else +int attribute_hidden __sigtimedwait(const sigset_t * set, siginfo_t * info, +									const struct timespec *timeout)  {  	if (set == NULL)  		__set_errno(EINVAL); @@ -48,5 +101,4 @@ int sigtimedwait(const sigset_t * set, siginfo_t * info,  	return -1;  }  #endif -libc_hidden_def(sigwaitinfo) -libc_hidden_def(sigtimedwait) +weak_alias(__sigtimedwait,sigtimedwait) diff --git a/libc/sysdeps/linux/common/__rt_sigwaitinfo.c b/libc/sysdeps/linux/common/__rt_sigwaitinfo.c new file mode 100644 index 000000000..c8953bfbc --- /dev/null +++ b/libc/sysdeps/linux/common/__rt_sigwaitinfo.c @@ -0,0 +1,102 @@ +/* vi: set sw=4 ts=4: */ +/* + * __rt_sigwaitinfo() for uClibc + * + * Copyright (C) 2006 by Steven Hill <sjhill@realitydiluted.com> + * Copyright (C) 2000-2004 by Erik Andersen <andersen@codepoet.org> + * + * GNU Library General Public License (LGPL) version 2 or later. + */ + +#include <sys/syscall.h> +#include <signal.h> +#include <string.h> + +libc_hidden_proto(memcpy) + +#ifdef __NR_rt_sigtimedwait + +#include <string.h> + +# ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#  include <sysdep-cancel.h> + +static int do_sigwaitinfo(const sigset_t *set, siginfo_t *info) +{ +#  ifdef SIGCANCEL +	sigset_t tmpset; + +	if (set != NULL && (__builtin_expect (__sigismember (set, SIGCANCEL), 0) +#   ifdef SIGSETXID +		|| __builtin_expect (__sigismember (set, SIGSETXID), 0) +#   endif +		)) +	{ +		/* Create a temporary mask without the bit for SIGCANCEL set.  */ +		// We are not copying more than we have to. +		memcpy (&tmpset, set, _NSIG / 8); +		__sigdelset (&tmpset, SIGCANCEL); +#   ifdef SIGSETXID +		__sigdelset (&tmpset, SIGSETXID); +#   endif +		set = &tmpset; +	} +#  endif + +	/* XXX The size argument hopefully will have to be changed to the +	   real size of the user-level sigset_t.  */ +	int result = INLINE_SYSCALL (rt_sigtimedwait, 4, set, info, +								 NULL, _NSIG / 8); + +	/* The kernel generates a SI_TKILL code in si_code in case tkill is +	   used.  tkill is transparently used in raise().  Since having +	   SI_TKILL as a code is useful in general we fold the results +	   here.  */ +	if (result != -1 && info != NULL && info->si_code == SI_TKILL) +		info->si_code = SI_USER; + +	return result; +} + +/* Return any pending signal or wait for one for the given time.  */ +int __sigwaitinfo(const sigset_t *set, siginfo_t *info) +{ +	if(SINGLE_THREAD_P) +		return do_sigwaitinfo(set, info); + +	int oldtype = LIBC_CANCEL_ASYNC(); + +	/* XXX The size argument hopefully will have to be changed to the +	   real size of the user-level sigset_t.  */ +	int result = do_sigwaitinfo(set, info); + +	LIBC_CANCEL_RESET(oldtype); + +	return result; +} +# else +#  define __need_NULL +#  include <stddef.h> +#  define __NR___rt_sigwaitinfo __NR_rt_sigtimedwait +static _syscall4(int, __rt_sigwaitinfo, const sigset_t *, set, +				 siginfo_t *, info, const struct timespec *, timeout, +				 size_t, setsize); + +int attribute_hidden __sigwaitinfo(const sigset_t * set, siginfo_t * info) +{ +	return __rt_sigwaitinfo(set, info, NULL, _NSIG / 8); +} +# endif +#else +int attribute_hidden __sigwaitinfo(const sigset_t * set, siginfo_t * info) +{ +	if (set == NULL) +		__set_errno(EINVAL); +	else +		__set_errno(ENOSYS); +	return -1; +} +#endif +libc_hidden_proto(sigwaitinfo) +weak_alias (__sigwaitinfo, sigwaitinfo) +libc_hidden_weak(sigwaitinfo) diff --git a/libc/sysdeps/linux/common/__syscall_fcntl.c b/libc/sysdeps/linux/common/__syscall_fcntl.c index 355b22b00..4e3bc23df 100644 --- a/libc/sysdeps/linux/common/__syscall_fcntl.c +++ b/libc/sysdeps/linux/common/__syscall_fcntl.c @@ -2,6 +2,7 @@  /*   * __syscall_fcntl() for uClibc   * + * Copyright (C) 2006 Steven J. Hill <sjhill@realitydiluted.com>   * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org>   *   * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. @@ -9,42 +10,83 @@  #include <sys/syscall.h>  #include <stdarg.h> +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h>	/* Must come before <fcntl.h>.  */ +#endif  #include <fcntl.h>  #include <bits/wordsize.h> -#define __NR___syscall_fcntl __NR_fcntl -static __always_inline -_syscall3(int, __syscall_fcntl, int, fd, int, cmd, long, arg) +extern __typeof(fcntl) __libc_fcntl; +libc_hidden_proto(__libc_fcntl) -int fcntl(int fd, int cmd, ...) +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +int __fcntl_nocancel (int fd, int cmd, ...)  { -	long arg; -	va_list list; +	va_list ap; +	void *arg; -	va_start(list, cmd); -	arg = va_arg(list, long); -	va_end(list); +	va_start (ap, cmd); +	arg = va_arg (ap, void *); +	va_end (ap); -#if __WORDSIZE == 32 +# if __WORDSIZE == 32  	if (cmd == F_GETLK64 || cmd == F_SETLK64 || cmd == F_SETLKW64) { -#if defined __UCLIBC_HAS_LFS__ && defined __NR_fcntl64 -		return fcntl64(fd, cmd, arg); -#else +#  if defined __UCLIBC_HAS_LFS__ && defined __NR_fcntl64 +		return INLINE_SYSCALL (fcntl64, 3, fd, cmd, arg); +#  else  		__set_errno(ENOSYS);  		return -1; -#endif +#  endif  	} +# endif +	return INLINE_SYSCALL (fcntl, 3, fd, cmd, arg); +}  #endif -	return (__syscall_fcntl(fd, cmd, arg)); -} -#ifndef __LINUXTHREADS_OLD__ -libc_hidden_def(fcntl) +int __libc_fcntl (int fd, int cmd, ...) +{ +	va_list ap; +	void *arg; + +	va_start (ap, cmd); +	arg = va_arg (ap, void *); +	va_end (ap); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	if (SINGLE_THREAD_P || (cmd != F_SETLKW && cmd != F_SETLKW64)) +# if defined __UCLIBC_HAS_LFS__ && defined __NR_fcntl64 +		return INLINE_SYSCALL (fcntl64, 3, fd, cmd, arg); +# else +		return INLINE_SYSCALL (fcntl, 3, fd, cmd, arg); +# endif + +	int oldtype = LIBC_CANCEL_ASYNC (); + +# if defined __UCLIBC_HAS_LFS__ && defined __NR_fcntl64 +	int result = INLINE_SYSCALL (fcntl64, 3, fd, cmd, arg); +# else +	int result = INLINE_SYSCALL (fcntl, 3, fd, cmd, arg); +# endif + +	LIBC_CANCEL_RESET (oldtype); + +	return result;  #else -libc_hidden_weak(fcntl) -strong_alias(fcntl,__libc_fcntl) +# if __WORDSIZE == 32 +	if (cmd == F_GETLK64 || cmd == F_SETLK64 || cmd == F_SETLKW64) { +#  if defined __UCLIBC_HAS_LFS__ && defined __NR_fcntl64 +		return INLINE_SYSCALL (fcntl64, 3, fd, cmd, arg); +#  else +		__set_errno(ENOSYS); +		return -1; +#  endif +	} +# endif +	return INLINE_SYSCALL (fcntl, 3, fd, cmd, arg);  #endif +} +libc_hidden_def(__libc_fcntl) -#if ! defined __NR_fcntl64 && defined __UCLIBC_HAS_LFS__ -strong_alias(fcntl,fcntl64) -#endif +libc_hidden_proto(fcntl) +weak_alias(__libc_fcntl,fcntl) +libc_hidden_weak(fcntl) diff --git a/libc/sysdeps/linux/common/__syscall_rt_sigaction.c b/libc/sysdeps/linux/common/__syscall_rt_sigaction.c index b4b007d02..006b38a2a 100644 --- a/libc/sysdeps/linux/common/__syscall_rt_sigaction.c +++ b/libc/sysdeps/linux/common/__syscall_rt_sigaction.c @@ -12,7 +12,9 @@  #ifdef __NR_rt_sigaction  #include <signal.h> -int __syscall_rt_sigaction (int __signum, const struct sigaction *__act, struct sigaction *__oldact, size_t __size) attribute_hidden; +int __syscall_rt_sigaction (int __signum, const struct sigaction *__act, +							struct sigaction *__oldact, size_t __size); +  #define __NR___syscall_rt_sigaction __NR_rt_sigaction  _syscall4(int, __syscall_rt_sigaction, int, signum,  		  const struct sigaction *, act, struct sigaction *, oldact, diff --git a/libc/sysdeps/linux/common/_exit.c b/libc/sysdeps/linux/common/_exit.c index 6cece0878..51117d109 100644 --- a/libc/sysdeps/linux/common/_exit.c +++ b/libc/sysdeps/linux/common/_exit.c @@ -12,13 +12,23 @@  #include <unistd.h>  #include <sys/types.h>  #include <sys/syscall.h> +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep.h> +#endif  void attribute_noreturn _exit(int status)  {  	/* The loop is added only to keep gcc happy. */  	while(1) +	{ +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +# ifdef __NR_exit_group +		INLINE_SYSCALL(exit_group, 1, status); +# endif +#endif  		INLINE_SYSCALL(exit, 1, status); +	}  }  libc_hidden_def(_exit)  weak_alias(_exit,_Exit) diff --git a/libc/sysdeps/linux/common/bits/kernel_sigaction.h b/libc/sysdeps/linux/common/bits/kernel_sigaction.h index f74e0a28a..0a35ac8cb 100644 --- a/libc/sysdeps/linux/common/bits/kernel_sigaction.h +++ b/libc/sysdeps/linux/common/bits/kernel_sigaction.h @@ -25,12 +25,12 @@ struct old_kernel_sigaction {   */  extern int __syscall_sigaction(int, const struct old_kernel_sigaction *, -	struct old_kernel_sigaction *) attribute_hidden; +	struct old_kernel_sigaction *);  #endif  extern int __syscall_rt_sigaction(int, const struct sigaction *, -	struct sigaction *, size_t) attribute_hidden; +	struct sigaction *, size_t);  #endif /* _BITS_SIGACTION_STRUCT_H */ diff --git a/libc/sysdeps/linux/common/bits/uClibc_mutex.h b/libc/sysdeps/linux/common/bits/uClibc_mutex.h index 14aeb9c80..c6094c3d2 100644 --- a/libc/sysdeps/linux/common/bits/uClibc_mutex.h +++ b/libc/sysdeps/linux/common/bits/uClibc_mutex.h @@ -62,7 +62,55 @@  #define __UCLIBC_MUTEX_UNLOCK(M)									\          __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M, 1) -#else +#ifdef __USE_STDIO_FUTEXES__ + +#include <bits/stdio-lock.h> + +#define __UCLIBC_IO_MUTEX(M)			_IO_lock_t M +#define __UCLIBC_IO_MUTEX_LOCK(M) 		_IO_lock_lock(M) +#define __UCLIBC_IO_MUTEX_UNLOCK(M) 	_IO_lock_unlock(M) +#define __UCLIBC_IO_MUTEX_TRYLOCK(M) 	_IO_lock_trylock(M) +#define __UCLIBC_IO_MUTEX_INIT(M) 	_IO_lock_t M = _IO_lock_initializer +#define __UCLIBC_IO_MUTEX_EXTERN(M)		extern _IO_lock_t M + +#define __UCLIBC_IO_MUTEX_CONDITIONAL_LOCK(M,C)		\ +	if (C) {										\ +		_IO_lock_lock(M);							\ +	} + +#define __UCLIBC_IO_MUTEX_CONDITIONAL_UNLOCK(M,C)	\ +	if (C) {										\ +		_IO_lock_unlock(M);							\ +	} + +#define __UCLIBC_IO_MUTEX_AUTO_LOCK(M,A,V)			\ +		__UCLIBC_IO_MUTEX_CONDITIONAL_LOCK(M,((A=(V))) == 0) + +#define __UCLIBC_IO_MUTEX_AUTO_UNLOCK(M,A)			\ +		__UCLIBC_IO_MUTEX_CONDITIONAL_UNLOCK(M,((A) == 0)) + +#define __UCLIBC_IO_MUTEX_LOCK_CANCEL_UNSAFE(M)		_IO_lock_lock(M) +#define __UCLIBC_IO_MUTEX_UNLOCK_CANCEL_UNSAFE(M) 	_IO_lock_unlock(M) + +#else /* of __USE_STDIO_FUTEXES__ */ + +#define __UCLIBC_IO_MUTEX(M)                        __UCLIBC_MUTEX(M) +#define __UCLIBC_IO_MUTEX_LOCK(M)                   __UCLIBC_MUTEX_CONDITIONAL_LOCK(M, 1) +#define __UCLIBC_IO_MUTEX_UNLOCK(M)                 __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M, 1) +#define __UCLIBC_IO_MUTEX_TRYLOCK(M)                __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE(M) +#define __UCLIBC_IO_MUTEX_INIT(M)                   __UCLIBC_MUTEX_INIT(M, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP) +#define __UCLIBC_IO_MUTEX_EXTERN(M)                 __UCLIBC_MUTEX_EXTERN(M) +#define __UCLIBC_IO_MUTEX_AUTO_LOCK(M,A,V)          __UCLIBC_MUTEX_AUTO_LOCK(M,A,V) +#define __UCLIBC_IO_MUTEX_AUTO_UNLOCK(M,A)          __UCLIBC_MUTEX_AUTO_UNLOCK(M,A) +#define __UCLIBC_IO_MUTEX_LOCK_CANCEL_UNSAFE(M)     __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(M) +#define __UCLIBC_IO_MUTEX_UNLOCK_CANCEL_UNSAFE(M)   __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE(M) +#define __UCLIBC_IO_MUTEX_CONDITIONAL_LOCK(M,C)     __UCLIBC_MUTEX_CONDITIONAL_LOCK(M, 1) +#define __UCLIBC_IO_MUTEX_CONDITIONAL_UNLOCK(M,C)   __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M, 1) + +#endif /* of __USE_STDIO_FUTEXES__ */ + + +#else /* of __UCLIBC_HAS_THREADS__ */  #define __UCLIBC_MUTEX(M)				void *__UCLIBC_MUTEX_DUMMY_ ## M  #define __UCLIBC_MUTEX_INIT(M,I)			extern void *__UCLIBC_MUTEX_DUMMY_ ## M @@ -83,6 +131,22 @@  #define __UCLIBC_MUTEX_LOCK(M)				((void)0)  #define __UCLIBC_MUTEX_UNLOCK(M)			((void)0) -#endif +#define __UCLIBC_IO_MUTEX(M)                        __UCLIBC_MUTEX(M) +#define __UCLIBC_IO_MUTEX_LOCK(M)                   __UCLIBC_MUTEX_CONDITIONAL_LOCK(M, 1) +#define __UCLIBC_IO_MUTEX_UNLOCK(M)                 __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M, 1) +#define __UCLIBC_IO_MUTEX_TRYLOCK(M)                __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE(M) +#define __UCLIBC_IO_MUTEX_INIT(M)                   __UCLIBC_MUTEX_INIT(M, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP) +#define __UCLIBC_IO_MUTEX_EXTERN(M)                 __UCLIBC_MUTEX_EXTERN(M) +#define __UCLIBC_IO_MUTEX_AUTO_LOCK(M,A,V)          __UCLIBC_MUTEX_AUTO_LOCK(M,A,V) +#define __UCLIBC_IO_MUTEX_AUTO_UNLOCK(M,A)          __UCLIBC_MUTEX_AUTO_UNLOCK(M,A) +#define __UCLIBC_IO_MUTEX_LOCK_CANCEL_UNSAFE(M)     __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(M) +#define __UCLIBC_IO_MUTEX_UNLOCK_CANCEL_UNSAFE(M)   __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE(M) +#define __UCLIBC_IO_MUTEX_CONDITIONAL_LOCK(M,C)     __UCLIBC_MUTEX_CONDITIONAL_LOCK(M, 1) +#define __UCLIBC_IO_MUTEX_CONDITIONAL_UNLOCK(M,C)   __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M, 1) + +#endif /* of __UCLIBC_HAS_THREADS__ */ + +#define __UCLIBC_IO_MUTEX_TRYLOCK_CANCEL_UNSAFE(M)	\ +		__UCLIBC_IO_MUTEX_TRYLOCK(M)  #endif /* _UCLIBC_MUTEX_H */ diff --git a/libc/sysdeps/linux/common/bits/uClibc_stdio.h b/libc/sysdeps/linux/common/bits/uClibc_stdio.h index 3631ef79f..a8cf4eb56 100644 --- a/libc/sysdeps/linux/common/bits/uClibc_stdio.h +++ b/libc/sysdeps/linux/common/bits/uClibc_stdio.h @@ -134,26 +134,26 @@          __UCLIBC_MUTEX_AUTO_LOCK_VAR(__infunc_user_locking)  #define __STDIO_AUTO_THREADLOCK(__stream)					\ -        __UCLIBC_MUTEX_AUTO_LOCK((__stream)->__lock, __infunc_user_locking,	\ +        __UCLIBC_IO_MUTEX_AUTO_LOCK((__stream)->__lock, __infunc_user_locking,	\  	(__stream)->__user_locking)  #define __STDIO_AUTO_THREADUNLOCK(__stream)					\ -        __UCLIBC_MUTEX_AUTO_UNLOCK((__stream)->__lock, __infunc_user_locking) +        __UCLIBC_IO_MUTEX_AUTO_UNLOCK((__stream)->__lock, __infunc_user_locking)  #define __STDIO_ALWAYS_THREADLOCK(__stream)					\ -        __UCLIBC_MUTEX_LOCK((__stream)->__lock) +        __UCLIBC_IO_MUTEX_LOCK((__stream)->__lock)  #define __STDIO_ALWAYS_THREADUNLOCK(__stream)					\ -        __UCLIBC_MUTEX_UNLOCK((__stream)->__lock) +        __UCLIBC_IO_MUTEX_UNLOCK((__stream)->__lock)  #define __STDIO_ALWAYS_THREADLOCK_CANCEL_UNSAFE(__stream)			\ -        __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE((__stream)->__lock) +        __UCLIBC_IO_MUTEX_LOCK_CANCEL_UNSAFE((__stream)->__lock)  #define __STDIO_ALWAYS_THREADTRYLOCK_CANCEL_UNSAFE(__stream)			\ -        __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE((__stream)->__lock) +        __UCLIBC_IO_MUTEX_TRYLOCK_CANCEL_UNSAFE((__stream)->__lock)  #define __STDIO_ALWAYS_THREADUNLOCK_CANCEL_UNSAFE(__stream)			\ -        __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE((__stream)->__lock) +        __UCLIBC_IO_MUTEX_UNLOCK_CANCEL_UNSAFE((__stream)->__lock)  #ifdef __UCLIBC_HAS_THREADS__  #define __STDIO_SET_USER_LOCKING(__stream)	((__stream)->__user_locking = 1) @@ -161,6 +161,14 @@  #define __STDIO_SET_USER_LOCKING(__stream)		((void)0)  #endif +#ifdef __UCLIBC_HAS_THREADS__ +#ifdef __USE_STDIO_FUTEXES__ +#define STDIO_INIT_MUTEX(M) _IO_lock_init(M) +#else +#define STDIO_INIT_MUTEX(M) __stdio_init_mutex(& M) +#endif +#endif +  /**********************************************************************/  #define __STDIO_IOFBF 0		/* Fully buffered.  */ @@ -275,7 +283,7 @@ struct __STDIO_FILE_STRUCT {  #endif  #ifdef __UCLIBC_HAS_THREADS__  	int __user_locking; -	__UCLIBC_MUTEX(__lock); +	__UCLIBC_IO_MUTEX(__lock);  #endif  /* Everything after this is unimplemented... and may be trashed. */  #if __STDIO_BUILTIN_BUF_SIZE > 0 @@ -351,9 +359,9 @@ extern void _stdio_term(void) attribute_hidden;  extern struct __STDIO_FILE_STRUCT *_stdio_openlist;  #ifdef __UCLIBC_HAS_THREADS__ -__UCLIBC_MUTEX_EXTERN(_stdio_openlist_add_lock); +__UCLIBC_IO_MUTEX_EXTERN(_stdio_openlist_add_lock);  #ifdef __STDIO_BUFFERS -__UCLIBC_MUTEX_EXTERN(_stdio_openlist_del_lock); +__UCLIBC_IO_MUTEX_EXTERN(_stdio_openlist_del_lock);  extern volatile int _stdio_openlist_use_count; /* _stdio_openlist_del_lock */  extern int _stdio_openlist_del_count; /* _stdio_openlist_del_lock */  #endif diff --git a/libc/sysdeps/linux/common/fsync.c b/libc/sysdeps/linux/common/fsync.c index 774efc9ce..711811f23 100644 --- a/libc/sysdeps/linux/common/fsync.c +++ b/libc/sysdeps/linux/common/fsync.c @@ -10,9 +10,28 @@  #include <sys/syscall.h>  #include <unistd.h> -#ifdef __LINUXTHREADS_OLD__ -extern __typeof(fsync) weak_function fsync; -strong_alias(fsync,__libc_fsync) +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include "sysdep-cancel.h" +#else +#define SINGLE_THREAD_P 1  #endif -_syscall1(int, fsync, int, fd) +#define __NR___syscall_fsync __NR_fsync +static inline _syscall1(int, __syscall_fsync, int, fd) + +extern __typeof(fsync) __libc_fsync; + +int __libc_fsync(int fd) +{ +	if (SINGLE_THREAD_P) +		return __syscall_fsync(fd); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	int result = __syscall_fsync(fd); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif +} + +weak_alias(__libc_fsync, fsync) diff --git a/libc/sysdeps/linux/common/ioctl.c b/libc/sysdeps/linux/common/ioctl.c index 7ac8f16c2..f2f0f539a 100644 --- a/libc/sysdeps/linux/common/ioctl.c +++ b/libc/sysdeps/linux/common/ioctl.c @@ -11,20 +11,36 @@  #include <stdarg.h>  #include <sys/ioctl.h> +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h> +#else +#define SINGLE_THREAD_P 1 +#endif + +libc_hidden_proto(ioctl)  #define __NR___syscall_ioctl __NR_ioctl  static __always_inline -_syscall3(int, __syscall_ioctl, int, fd, int, request, void *, arg) +_syscall3(int, __syscall_ioctl, int, fd, unsigned long int, request, void *, arg)  int ioctl(int fd, unsigned long int request, ...)  { -    void *arg; -    va_list list; +	void *arg; +	va_list list; + +	va_start(list, request); +	arg = va_arg(list, void *); + +	va_end(list); -    va_start(list, request); -    arg = va_arg(list, void *); -    va_end(list); +	if (SINGLE_THREAD_P) +		return __syscall_ioctl(fd, request, arg); -    return __syscall_ioctl(fd, request, arg); +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	int result = __syscall_ioctl(fd, request, arg); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif  }  libc_hidden_def(ioctl) diff --git a/libc/sysdeps/linux/common/libgcc_s.h b/libc/sysdeps/linux/common/libgcc_s.h new file mode 100644 index 000000000..e74a1034c --- /dev/null +++ b/libc/sysdeps/linux/common/libgcc_s.h @@ -0,0 +1,2 @@ +/* Name of libgcc_s library provided by gcc.  */ +#define LIBGCC_S_SO "libgcc_s.so.1" diff --git a/libc/sysdeps/linux/common/msync.c b/libc/sysdeps/linux/common/msync.c index 7a46f0c32..2629bd4aa 100644 --- a/libc/sysdeps/linux/common/msync.c +++ b/libc/sysdeps/linux/common/msync.c @@ -9,16 +9,33 @@  #include <sys/syscall.h>  #include <unistd.h> +#include <sys/mman.h> -#if defined __NR_msync && defined __ARCH_USE_MMU__ +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h> +#else +#define SINGLE_THREAD_P 1 +#endif -#include <sys/mman.h> +#define __NR___syscall_msync __NR_msync +static __always_inline _syscall3(int, __syscall_msync, void *, addr, size_t, length, +						int, flags) -#ifdef __LINUXTHREADS_OLD__ -extern __typeof(msync) weak_function msync; -strong_alias(msync,__libc_msync) +extern __typeof(msync) __libc_msync; +int __libc_msync(void * addr, size_t length, int flags) +{ +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype, result;  #endif -_syscall3(int, msync, void *, addr, size_t, length, int, flags) +	if (SINGLE_THREAD_P) +		return __syscall_msync(addr, length, flags); +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	oldtype = LIBC_CANCEL_ASYNC (); +	result = __syscall_msync(addr, length, flags); +	LIBC_CANCEL_RESET (oldtype); +	return result;  #endif +} +weak_alias(__libc_msync,msync) diff --git a/libc/sysdeps/linux/common/nanosleep.c b/libc/sysdeps/linux/common/nanosleep.c index 0849127db..0be59c511 100644 --- a/libc/sysdeps/linux/common/nanosleep.c +++ b/libc/sysdeps/linux/common/nanosleep.c @@ -10,13 +10,32 @@  #include <sys/syscall.h>  #include <time.h> -#if defined __USE_POSIX199309 && defined __NR_nanosleep -_syscall2(int, nanosleep, const struct timespec *, req, -		  struct timespec *, rem) -#ifndef __LINUXTHREADS_OLD__ -libc_hidden_def(nanosleep) +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h> +#include <pthreadP.h>  #else -libc_hidden_weak(nanosleep) -strong_alias(nanosleep,__libc_nanosleep) +#define SINGLE_THREAD_P 1  #endif + +#define __NR___syscall_nanosleep __NR_nanosleep +static inline _syscall2(int, __syscall_nanosleep, const struct timespec *, req, +						struct timespec *, rem); + +extern __typeof(nanosleep) __libc_nanosleep; + +int __libc_nanosleep(const struct timespec *req, struct timespec *rem) +{ +	if (SINGLE_THREAD_P) +		return __syscall_nanosleep(req, rem); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	int result = __syscall_nanosleep(req, rem); +	LIBC_CANCEL_RESET (oldtype); +	return result;  #endif +} + +libc_hidden_proto(nanosleep) +weak_alias(__libc_nanosleep,nanosleep) +libc_hidden_weak(nanosleep) diff --git a/libc/sysdeps/linux/common/not-cancel.h b/libc/sysdeps/linux/common/not-cancel.h new file mode 100644 index 000000000..9418417b4 --- /dev/null +++ b/libc/sysdeps/linux/common/not-cancel.h @@ -0,0 +1,60 @@ +/* Uncancelable versions of cancelable interfaces.  Linux version. +   Copyright (C) 2003 Free Software Foundation, Inc. +   This file is part of the GNU C Library. +   Contributed by Ulrich Drepper <drepper@redhat.com>, 2003. + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +#include <sysdep.h> + +/* Uncancelable open.  */ +#define open_not_cancel(name, flags, mode) \ +   INLINE_SYSCALL (open, 3, (const char *) (name), (flags), (mode)) +#define open_not_cancel_2(name, flags) \ +   INLINE_SYSCALL (open, 2, (const char *) (name), (flags)) + +/* Uncancelable close.  */ +#define close_not_cancel(fd) \ +  INLINE_SYSCALL (close, 1, fd) +#define close_not_cancel_no_status(fd) \ +  (void) ({ INTERNAL_SYSCALL_DECL (err);				      \ +	    INTERNAL_SYSCALL (close, err, 1, (fd)); }) + +/* Uncancelable read.  */ +#define read_not_cancel(fd, buf, n) \ +  INLINE_SYSCALL (read, 3, (fd), (buf), (n)) + +/* Uncancelable write.  */ +#define write_not_cancel(fd, buf, n) \ +  INLINE_SYSCALL (write, 3, (fd), (buf), (n)) + +/* Uncancelable writev.  */ +#define writev_not_cancel_no_status(fd, iov, n) \ +  (void) ({ INTERNAL_SYSCALL_DECL (err);				      \ +	    INTERNAL_SYSCALL (writev, err, 3, (fd), (iov), (n)); }) + +/* Uncancelable fcntl.  */ +#define fcntl_not_cancel(fd, cmd, val) \ +  __fcntl_nocancel (fd, cmd, val) + +/* Uncancelable waitpid.  */ +#ifdef __NR_waitpid +# define waitpid_not_cancel(pid, stat_loc, options) \ +  INLINE_SYSCALL (waitpid, 3, pid, stat_loc, options) +#else +# define waitpid_not_cancel(pid, stat_loc, options) \ +  INLINE_SYSCALL (wait4, 4, pid, stat_loc, options, NULL) +#endif diff --git a/libc/sysdeps/linux/common/open64.c b/libc/sysdeps/linux/common/open64.c index cfe471c64..c1f5400b8 100644 --- a/libc/sysdeps/linux/common/open64.c +++ b/libc/sysdeps/linux/common/open64.c @@ -7,6 +7,10 @@  #include <features.h>  #include <fcntl.h>  #include <stdarg.h> +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <errno.h> +#include <sysdep-cancel.h> +#endif  #ifdef __UCLIBC_HAS_LFS__ @@ -28,7 +32,20 @@ int open64 (const char *file, int oflag, ...)  	va_end (arg);      } -    return open(file, oflag | O_LARGEFILE, mode); +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +  if (SINGLE_THREAD_P) +    return INLINE_SYSCALL (open, 3, file, oflag | O_LARGEFILE, mode); + +  int oldtype = LIBC_CANCEL_ASYNC (); + +  int result = INLINE_SYSCALL (open, 3, file, oflag | O_LARGEFILE, mode); + +  LIBC_CANCEL_RESET (oldtype); + +  return result; +#else +  return open(file, oflag | O_LARGEFILE, mode); +#endif  }  #ifndef __LINUXTHREADS_OLD__  libc_hidden_def(open64) diff --git a/libc/sysdeps/linux/common/pause.c b/libc/sysdeps/linux/common/pause.c index 19ba30706..132ffa856 100644 --- a/libc/sysdeps/linux/common/pause.c +++ b/libc/sysdeps/linux/common/pause.c @@ -10,18 +10,31 @@  #define __UCLIBC_HIDE_DEPRECATED__  #include <sys/syscall.h>  #include <unistd.h> -#include <signal.h> -#ifdef __LINUXTHREADS_OLD__ -extern __typeof(pause) weak_function pause; -strong_alias(pause, __libc_pause) +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h>  #endif -#ifdef __NR_pause -_syscall0(int, pause) -#else -int pause(void) +#include <signal.h> + +/* Suspend the process until a signal arrives. +   This always returns -1 and sets errno to EINTR.  */ +int +__libc_pause (void)  { -	return __sigpause(sigblock(0), 0); +  sigset_t set; + +  __sigemptyset (&set); +  sigprocmask (SIG_BLOCK, NULL, &set); + +  /* pause is a cancellation point, but so is sigsuspend. +     So no need for anything special here.  */ + +  return sigsuspend (&set);  } +weak_alias (__libc_pause, pause) + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +LIBC_CANCEL_HANDLED ();		/* sigsuspend handles our cancellation.  */  #endif + diff --git a/libc/sysdeps/linux/common/poll.c b/libc/sysdeps/linux/common/poll.c index 52f6c76b4..3895e0da7 100644 --- a/libc/sysdeps/linux/common/poll.c +++ b/libc/sysdeps/linux/common/poll.c @@ -21,30 +21,33 @@  #include <sys/poll.h>  #include <bits/kernel-features.h> -#if defined __ASSUME_POLL_SYSCALL && defined __NR_poll +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h> +#else +#define SINGLE_THREAD_P 1 +#endif -_syscall3(int, poll, struct pollfd *, fds, -	unsigned long int, nfds, int, timeout) +libc_hidden_proto(poll) -#elif defined(__NR_ppoll) && defined __UCLIBC_LINUX_SPECIFIC__ +#if defined __ASSUME_POLL_SYSCALL && defined __NR_poll + +#define __NR___syscall_poll __NR_poll +static inline _syscall3(int, __syscall_poll, struct pollfd *, fds, +			unsigned long int, nfds, int, timeout);  int poll(struct pollfd *fds, nfds_t nfds, int timeout)  { -	struct timespec *ts = NULL, tval; -	if (timeout > 0) { -		tval.tv_sec = timeout / 1000; -		tval.tv_nsec = (timeout % 1000) * 1000000; -		ts = &tval; -	} else if (timeout == 0) { -		tval.tv_sec = 0; -		tval.tv_nsec = 0; -		ts = &tval; -	} -	return ppoll(fds, nfds, ts, NULL); +    if (SINGLE_THREAD_P) +	return __syscall_poll(fds, nfds, timeout); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +    int oldtype = LIBC_CANCEL_ASYNC (); +    int result = __syscall_poll(fds, nfds, timeout); +    LIBC_CANCEL_RESET (oldtype); +    return result; +#endif  } - -#else -/* ugh, this arch lacks poll, so we need to emulate this crap ... */ +#else /* !__NR_poll */  #include <alloca.h>  #include <sys/types.h> @@ -54,6 +57,9 @@ int poll(struct pollfd *fds, nfds_t nfds, int timeout)  #include <sys/param.h>  #include <unistd.h> +libc_hidden_proto(getdtablesize) +libc_hidden_proto(select) +  /* uClinux 2.0 doesn't have poll, emulate it using select */  /* Poll the file descriptors described by the NFDS structures starting at @@ -221,10 +227,4 @@ int poll(struct pollfd *fds, nfds_t nfds, int timeout)  }  #endif - -#ifndef __LINUXTHREADS_OLD__  libc_hidden_def(poll) -#else -libc_hidden_weak(poll) -strong_alias(poll,__libc_poll) -#endif diff --git a/libc/sysdeps/linux/common/pselect.c b/libc/sysdeps/linux/common/pselect.c index 63ab0dbb1..7e93537dd 100644 --- a/libc/sysdeps/linux/common/pselect.c +++ b/libc/sysdeps/linux/common/pselect.c @@ -22,9 +22,12 @@  #include <stddef.h>	/* For NULL.  */  #include <sys/time.h>  #include <sys/select.h> +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h> +#endif -extern __typeof(pselect) __libc_pselect; - +libc_hidden_proto(sigprocmask) +libc_hidden_proto(select)  /* Check the first NFDS descriptors each in READFDS (if not NULL) for read @@ -33,8 +36,13 @@ extern __typeof(pselect) __libc_pselect;     after waiting the interval specified therein.  Additionally set the sigmask     SIGMASK for this call.  Returns the number of ready descriptors, or -1 for     errors.  */ +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +static int +__pselect (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, +#else  int -__libc_pselect (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, +pselect (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, +#endif  	   const struct timespec *timeout, const sigset_t *sigmask)  {    struct timeval tval; @@ -64,4 +72,23 @@ __libc_pselect (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,    return retval;  } -weak_alias(__libc_pselect,pselect) + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +int +pselect (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, +	   const struct timespec *timeout, const sigset_t *sigmask) +{ +	if (SINGLE_THREAD_P) +		return __pselect (nfds, readfds, writefds, exceptfds, +				  timeout, sigmask); + +	int oldtype = LIBC_CANCEL_ASYNC (); + +	int result = __pselect (nfds, readfds, writefds, exceptfds, +				 timeout, sigmask); + +	LIBC_CANCEL_RESET (oldtype); + +	return result; +} +#endif diff --git a/libc/sysdeps/linux/common/readv.c b/libc/sysdeps/linux/common/readv.c index 3c40a0d8d..fce396d5f 100644 --- a/libc/sysdeps/linux/common/readv.c +++ b/libc/sysdeps/linux/common/readv.c @@ -2,7 +2,8 @@  /*   * readv() for uClibc   * - * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org> + * Copyright (C) 2006 by Steven J. Hill <sjhill@realitydiluted.com> + * Copyright (C) 2000-2004 by Erik Andersen <andersen@codepoet.org>   *   * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.   */ @@ -10,5 +11,40 @@  #include <sys/syscall.h>  #include <sys/uio.h> +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h> + +/* We should deal with kernel which have a smaller UIO_FASTIOV as well +   as a very big count.  */ +static ssize_t __readv (int fd, const struct iovec *vector, int count) +{ +  ssize_t bytes_read; + +  bytes_read = INLINE_SYSCALL (readv, 3, fd, vector, count); + +  if (bytes_read >= 0 || errno != EINVAL || count <= UIO_FASTIOV) +    return bytes_read; + +  /* glibc tries again, but we do not. */ +  //return __atomic_readv_replacement (fd, vector, count); + +  return -1; +} + +ssize_t readv (int fd, const struct iovec *vector, int count) +{ +  if (SINGLE_THREAD_P) +    return __readv (fd, vector, count); + +  int oldtype = LIBC_CANCEL_ASYNC (); + +  int result = __readv (fd, vector, count); + +  LIBC_CANCEL_RESET (oldtype); + +  return result; +} +#else  _syscall3(ssize_t, readv, int, filedes, const struct iovec *, vector,  		  int, count) +#endif diff --git a/libc/sysdeps/linux/common/select.c b/libc/sysdeps/linux/common/select.c index caff28d7c..0c2d91984 100644 --- a/libc/sysdeps/linux/common/select.c +++ b/libc/sysdeps/linux/common/select.c @@ -11,15 +11,21 @@  #include <sys/select.h>  #include <stdint.h> -extern __typeof(select) __libc_select; +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h> +#else +#define SINGLE_THREAD_P 1 +#endif  #define USEC_PER_SEC 1000000L +extern __typeof(select) __libc_select; +  #if !defined(__NR__newselect) && !defined(__NR_select) && defined __USE_XOPEN2K  # define __NR___libc_pselect6 __NR_pselect6  _syscall6(int, __libc_pselect6, int, n, fd_set *, readfds, fd_set *, writefds, -	fd_set *, exceptfds, const struct timespec *, timeout, -	const sigset_t *, sigmask) +        fd_set *, exceptfds, const struct timespec *, timeout, +        const sigset_t *, sigmask)  int __libc_select(int n, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,                    struct timeval *timeout) @@ -30,12 +36,12 @@ int __libc_select(int n, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,  		_ts.tv_sec = timeout->tv_sec;  		/* GNU extension: allow for timespec values where the sub-sec -		 * field is equal to or more than 1 second.  The kernel will -		 * reject this on us, so take care of the time shift ourself. -		 * Some applications (like readline and linphone) do this. -		 * See 'clarification on select() type calls and invalid timeouts' -		 * on the POSIX general list for more information. -		 */ +		* field is equal to or more than 1 second.  The kernel will +		* reject this on us, so take care of the time shift ourself. +		* Some applications (like readline and linphone) do this. +		* See 'clarification on select() type calls and invalid timeouts' +		* on the POSIX general list for more information. +		*/  		usec = timeout->tv_usec;  		if (usec >= USEC_PER_SEC) {  			_ts.tv_sec += usec / USEC_PER_SEC; @@ -46,18 +52,41 @@ int __libc_select(int n, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,  		ts = &_ts;  	} -	return __libc_pselect6(n, readfds, writefds, exceptfds, ts, 0); +	if (SINGLE_THREAD_P) +		return __libc_pselect6(n, readfds, writefds, exceptfds, ts, 0); +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	int result = __libc_pselect6(n, readfds, writefds, exceptfds, ts, 0); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif +  }  #else  #ifdef __NR__newselect -# define __NR___libc_select __NR__newselect +# define __NR___syscall_select __NR__newselect  #else -# define __NR___libc_select __NR_select +# define __NR___syscall_select __NR_select  #endif -_syscall5(int, __libc_select, int, n, fd_set *, readfds, fd_set *, writefds, -		  fd_set *, exceptfds, struct timeval *, timeout) + +_syscall5(int, __syscall_select, int, n, fd_set *, readfds, +		fd_set *, writefds, fd_set *, exceptfds, struct timeval *, timeout); + +int __libc_select(int n, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, +                  struct timeval *timeout) +{ +	if (SINGLE_THREAD_P) +		return __syscall_select(n, readfds, writefds, exceptfds, timeout); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	int result = __syscall_select(n, readfds, writefds, exceptfds, timeout); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif +}  #endif diff --git a/libc/sysdeps/linux/common/sigprocmask.c b/libc/sysdeps/linux/common/sigprocmask.c index 13bb2beb7..011d7b367 100644 --- a/libc/sysdeps/linux/common/sigprocmask.c +++ b/libc/sysdeps/linux/common/sigprocmask.c @@ -14,6 +14,7 @@  #undef sigprocmask +libc_hidden_proto(sigprocmask)  #ifdef __NR_rt_sigprocmask @@ -24,20 +25,28 @@ _syscall4(int, __rt_sigprocmask, int, how, const sigset_t *, set,  int sigprocmask(int how, const sigset_t * set, sigset_t * oldset)  { -	if (set && -# if (SIG_BLOCK == 0) && (SIG_UNBLOCK == 1) && (SIG_SETMASK == 2) -		(((unsigned int) how) > 2) -# elif (SIG_BLOCK == 1) && (SIG_UNBLOCK == 2) && (SIG_SETMASK == 3) -		(((unsigned int)(how-1)) > 2) -# else -#  warning "compile time assumption violated.. slow path..." -		((how != SIG_BLOCK) && (how != SIG_UNBLOCK) -		 && (how != SIG_SETMASK)) +#ifdef SIGCANCEL +	sigset_t local_newmask; + +	/* +	 * The only thing we have to make sure here is that SIGCANCEL and +	 * SIGSETXID are not blocked. +	 */ +	if (set != NULL && (__builtin_expect (__sigismember (set, SIGCANCEL), 0) +# ifdef SIGSETXID +		|| __builtin_expect (__sigismember (set, SIGSETXID), 0) +# endif +		)) +	{ +		local_newmask = *set; +		__sigdelset (&local_newmask, SIGCANCEL); +# ifdef SIGSETXID +		__sigdelset (&local_newmask, SIGSETXID);  # endif -		) { -		__set_errno(EINVAL); -		return -1; +		set = &local_newmask;  	} +#endif +  	return __rt_sigprocmask(how, set, oldset, _NSIG / 8);  } @@ -51,20 +60,28 @@ _syscall3(int, __syscall_sigprocmask, int, how, const sigset_t *, set,  int sigprocmask(int how, const sigset_t * set, sigset_t * oldset)  { -	if (set && -# if (SIG_BLOCK == 0) && (SIG_UNBLOCK == 1) && (SIG_SETMASK == 2) -		(((unsigned int) how) > 2) -# elif (SIG_BLOCK == 1) && (SIG_UNBLOCK == 2) && (SIG_SETMASK == 3) -		(((unsigned int)(how-1)) > 2) -# else -#  warning "compile time assumption violated.. slow path..." -		((how != SIG_BLOCK) && (how != SIG_UNBLOCK) -		 && (how != SIG_SETMASK)) +#ifdef SIGCANCEL +	sigset_t local_newmask; + +	/* +	 * The only thing we have to make sure here is that SIGCANCEL and +	 * SIGSETXID are not blocked. +	 */ +	if (set != NULL && (__builtin_expect (__sigismember (set, SIGCANCEL), 0) +# ifdef SIGSETXID +		|| __builtin_expect (__sigismember (set, SIGSETXID), 0) +# endif +		)) +	{ +		local_newmask = *set; +		__sigdelset (&local_newmask, SIGCANCEL); +# ifdef SIGSETXID +		__sigdelset (&local_newmask, SIGSETXID);  # endif -		) { -		__set_errno(EINVAL); -		return -1; +		set = &local_newmask;  	} +#endif +  	return (__syscall_sigprocmask(how, set, oldset));  }  #endif diff --git a/libc/sysdeps/linux/common/sigsuspend.c b/libc/sysdeps/linux/common/sigsuspend.c index 3648e76b5..789eeda89 100644 --- a/libc/sysdeps/linux/common/sigsuspend.c +++ b/libc/sysdeps/linux/common/sigsuspend.c @@ -11,27 +11,49 @@  #if defined __USE_POSIX  #include <signal.h> +#undef sigsuspend -extern __typeof(sigsuspend) __libc_sigsuspend; +libc_hidden_proto(sigsuspend)  #ifdef __NR_rt_sigsuspend  # define __NR___rt_sigsuspend __NR_rt_sigsuspend -static __inline__ _syscall2(int, __rt_sigsuspend, const sigset_t *, mask, size_t, size) -int __libc_sigsuspend(const sigset_t * mask) +# ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#  include <errno.h> +#  include <sysdep-cancel.h> + +/* Change the set of blocked signals to SET, +   wait until a signal arrives, and restore the set of blocked signals.  */ +int sigsuspend (const sigset_t *set) +{ +	if (SINGLE_THREAD_P) +		return INLINE_SYSCALL (rt_sigsuspend, 2, set, _NSIG / 8); + +	int oldtype = LIBC_CANCEL_ASYNC (); + +	int result = INLINE_SYSCALL (rt_sigsuspend, 2, set, _NSIG / 8); + +	LIBC_CANCEL_RESET (oldtype); + +	return result; +} +# else +static inline _syscall2(int, __rt_sigsuspend, const sigset_t *, mask, size_t, size); + +int sigsuspend(const sigset_t * mask)  {  	return __rt_sigsuspend(mask, _NSIG / 8);  } +# endif  #else  # define __NR___syscall_sigsuspend __NR_sigsuspend  static __inline__ _syscall3(int, __syscall_sigsuspend, int, a, unsigned long int, b,  		  unsigned long int, c) -int __libc_sigsuspend(const sigset_t * set) +int sigsuspend(const sigset_t * set)  {  	return __syscall_sigsuspend(0, 0, set->__val[0]);  }  #endif -weak_alias(__libc_sigsuspend,sigsuspend) -libc_hidden_weak(sigsuspend) +libc_hidden_def(sigsuspend)  #endif diff --git a/libc/sysdeps/linux/common/sysdep.h b/libc/sysdeps/linux/common/sysdep.h index cd5b2f1e3..dae74d7c5 100644 --- a/libc/sysdeps/linux/common/sysdep.h +++ b/libc/sysdeps/linux/common/sysdep.h @@ -137,3 +137,23 @@  # endif  #endif /* __ASSEMBLER__ */ + +/* Values used for encoding parameter of cfi_personality and cfi_lsda.  */ +#define DW_EH_PE_absptr		0x00 +#define DW_EH_PE_omit		0xff +#define DW_EH_PE_uleb128	0x01 +#define DW_EH_PE_udata2		0x02 +#define DW_EH_PE_udata4		0x03 +#define DW_EH_PE_udata8		0x04 +#define DW_EH_PE_sleb128	0x09 +#define DW_EH_PE_sdata2		0x0a +#define DW_EH_PE_sdata4		0x0b +#define DW_EH_PE_sdata8		0x0c +#define DW_EH_PE_signed		0x08 +#define DW_EH_PE_pcrel		0x10 +#define DW_EH_PE_textrel	0x20 +#define DW_EH_PE_datarel	0x30 +#define DW_EH_PE_funcrel	0x40 +#define DW_EH_PE_aligned	0x50 +#define DW_EH_PE_indirect	0x80 + diff --git a/libc/sysdeps/linux/common/wait.c b/libc/sysdeps/linux/common/wait.c index b16495314..d4b79bd37 100644 --- a/libc/sysdeps/linux/common/wait.c +++ b/libc/sysdeps/linux/common/wait.c @@ -1,23 +1,43 @@  /* + * Copyright (C) 2006 Steven J. Hill <sjhill@realitydiluted.com>   * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org>   *   * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.   */ -  #include <stdlib.h>  #include <syscall.h>  #include <sys/types.h>  #include <sys/wait.h>  #include <sys/resource.h> -#ifdef __LINUXTHREADS_OLD__ -extern __typeof(wait) weak_function wait; -strong_alias(wait,__libc_wait) -#endif +/* Wait for a child to die.  When one does, put its status in *STAT_LOC + * and return its process ID.  For errors, return (pid_t) -1.  */ +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <errno.h> +#include <sysdep-cancel.h> + +pid_t attribute_hidden +__libc_wait (__WAIT_STATUS_DEFN stat_loc) +{ +  if (SINGLE_THREAD_P) +    return INLINE_SYSCALL (wait4, 4, WAIT_ANY, stat_loc, 0, +			   (struct rusage *) NULL); +  int oldtype = LIBC_CANCEL_ASYNC (); + +  pid_t result = INLINE_SYSCALL (wait4, 4, WAIT_ANY, stat_loc, 0, +				 (struct rusage *) NULL); + +  LIBC_CANCEL_RESET (oldtype); + +  return result; +} +#else  /* Wait for a child to die.  When one does, put its status in *STAT_LOC   * and return its process ID.  For errors, return (pid_t) -1.  */ -__pid_t wait(__WAIT_STATUS_DEFN stat_loc) +__pid_t __libc_wait (__WAIT_STATUS_DEFN stat_loc)  { -	return wait4(WAIT_ANY, stat_loc, 0, NULL); +      return wait4 (WAIT_ANY, stat_loc, 0, (struct rusage *) NULL);  } +#endif +weak_alias(__libc_wait,wait) diff --git a/libc/sysdeps/linux/common/waitpid.c b/libc/sysdeps/linux/common/waitpid.c index e46499377..d0437194d 100644 --- a/libc/sysdeps/linux/common/waitpid.c +++ b/libc/sysdeps/linux/common/waitpid.c @@ -1,5 +1,6 @@  /* vi: set sw=4 ts=4: */  /* + * Copyright (C) 2006 Steven J. Hill <sjhill@realitydiluted.com>   * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org>   *   * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. @@ -10,13 +11,27 @@  #include <sys/wait.h>  #include <sys/resource.h> -__pid_t waitpid(__pid_t pid, int *wait_stat, int options) +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include "sysdep-cancel.h" +#else +#define SINGLE_THREAD_P 1 +#endif + +libc_hidden_proto(wait4) + +extern __typeof(waitpid) __libc_waitpid; +__pid_t __libc_waitpid(__pid_t pid, int *wait_stat, int options)  { -	return wait4(pid, wait_stat, options, NULL); +	if (SINGLE_THREAD_P) +		return wait4(pid, wait_stat, options, NULL); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	int result = wait4(pid, wait_stat, options, NULL); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif  } -#ifndef __LINUXTHREADS_OLD__ -libc_hidden_def(waitpid) -#else +libc_hidden_proto(waitpid) +weak_alias(__libc_waitpid,waitpid)  libc_hidden_weak(waitpid) -strong_alias(waitpid,__libc_waitpid) -#endif diff --git a/libc/sysdeps/linux/common/writev.c b/libc/sysdeps/linux/common/writev.c index 99de7e43d..bd0e4077d 100644 --- a/libc/sysdeps/linux/common/writev.c +++ b/libc/sysdeps/linux/common/writev.c @@ -10,5 +10,41 @@  #include <sys/syscall.h>  #include <sys/uio.h> +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <errno.h> +#include <sysdep-cancel.h> + +/* We should deal with kernel which have a smaller UIO_FASTIOV as well +   as a very big count.  */ +static ssize_t __writev (int fd, const struct iovec *vector, int count) +{ +  ssize_t bytes_written; + +  bytes_written = INLINE_SYSCALL (writev, 3, fd, vector, count); + +  if (bytes_written >= 0 || errno != EINVAL || count <= UIO_FASTIOV) +    return bytes_written; + +  /* glibc tries again, but we do not. */ +  /* return __atomic_writev_replacement (fd, vector, count); */ + +  return -1; +} + +ssize_t writev (int fd, const struct iovec *vector, int count) +{ +  if (SINGLE_THREAD_P) +    return __writev (fd, vector, count); + +  int oldtype = LIBC_CANCEL_ASYNC (); + +  ssize_t result = __writev (fd, vector, count); + +  LIBC_CANCEL_RESET (oldtype); + +  return result; +} +#else  _syscall3(ssize_t, writev, int, filedes, const struct iovec *, vector,  		  int, count) +#endif diff --git a/libc/sysdeps/linux/i386/Makefile.arch b/libc/sysdeps/linux/i386/Makefile.arch index 2bf2b7607..668cca742 100644 --- a/libc/sysdeps/linux/i386/Makefile.arch +++ b/libc/sysdeps/linux/i386/Makefile.arch @@ -5,8 +5,17 @@  # Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.  # -CSRC := brk.c sigaction.c __syscall_error.c +CSRC := brk.c __syscall_error.c + +ifneq ($(UCLIBC_HAS_THREADS_NATIVE),y) +CSRC += sigaction.c +endif  SSRC := \ -	__longjmp.S vfork.S clone.S setjmp.S bsd-setjmp.S bsd-_setjmp.S \ +	__longjmp.S setjmp.S bsd-setjmp.S bsd-_setjmp.S \  	sync_file_range.S syscall.S mmap.S mmap64.S posix_fadvise64.S + + +ifneq ($(UCLIBC_HAS_THREADS_NATIVE),y) +SSRC += vfork.S clone.S +endif diff --git a/libc/sysdeps/linux/i386/bits/syscalls.h b/libc/sysdeps/linux/i386/bits/syscalls.h index 0427d91cd..9184bd6c3 100644 --- a/libc/sysdeps/linux/i386/bits/syscalls.h +++ b/libc/sysdeps/linux/i386/bits/syscalls.h @@ -23,7 +23,7 @@  		"int	$0x80\n\t"                              \  		RESTOREARGS_##nr                                \  		: "=a" (resultvar)                              \ -		: "i" (name) ASMFMT_##nr(args) : "memory", "cc" \ +		: "g" (name) ASMFMT_##nr(args) : "memory", "cc" \  	); \  	(int) resultvar; \  }) diff --git a/libc/sysdeps/linux/i386/bits/uClibc_arch_features.h b/libc/sysdeps/linux/i386/bits/uClibc_arch_features.h index deeec03d5..536e9c155 100644 --- a/libc/sysdeps/linux/i386/bits/uClibc_arch_features.h +++ b/libc/sysdeps/linux/i386/bits/uClibc_arch_features.h @@ -37,7 +37,7 @@  #undef __UCLIBC_HAVE_ASM_GLOBAL_DOT_NAME__  /* define if target supports CFI pseudo ops */ -#undef __UCLIBC_HAVE_ASM_CFI_DIRECTIVES__ +#define __UCLIBC_HAVE_ASM_CFI_DIRECTIVES__  /* define if target supports IEEE signed zero floats */  #define __UCLIBC_HAVE_SIGNED_ZERO__ diff --git a/libc/sysdeps/linux/i386/clone.S b/libc/sysdeps/linux/i386/clone.S index 14fc25ca1..a7de3fe27 100644 --- a/libc/sysdeps/linux/i386/clone.S +++ b/libc/sysdeps/linux/i386/clone.S @@ -79,7 +79,10 @@ clone:  	movl	%eax,8(%ecx)  	/* Don't leak any information.  */  	movl	$0,4(%ecx) +#ifndef RESET_PID  	movl	$0,(%ecx) +#endif +  	/* Do the system call */  	pushl	%ebx @@ -90,6 +93,10 @@ clone:  	movl	FLAGS+12(%esp),%ebx  	movl	CTID+12(%esp),%edi  	movl	$__NR_clone,%eax +#ifdef RESET_PID +	/* Remember the flag value.  */ +	movl	%ebx, (%ecx) +#endif  	int	$0x80  	popl	%edi  	popl	%esi @@ -121,3 +128,4 @@ __error:  	jmp __syscall_error  .size clone,.-clone +weak_alias(clone, __clone) diff --git a/libc/sysdeps/linux/i386/sysdep.h b/libc/sysdeps/linux/i386/sysdep.h new file mode 100644 index 000000000..ff67e8a08 --- /dev/null +++ b/libc/sysdeps/linux/i386/sysdep.h @@ -0,0 +1,460 @@ +/* Copyright (C) 1992,1993,1995-2000,2002-2006,2007 +	Free Software Foundation, Inc. +   This file is part of the GNU C Library. +   Contributed by Ulrich Drepper, <drepper@gnu.org>, August 1995. + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +#ifndef _LINUX_I386_SYSDEP_H +#define _LINUX_I386_SYSDEP_H 1 + +#include <sys/syscall.h> +#include <common/sysdep.h> + +#ifdef	__ASSEMBLER__ + +/* Syntactic details of assembler.  */ + +/* ELF uses byte-counts for .align, most others use log2 of count of bytes.  */ +#define ALIGNARG(log2) 1<<log2 +/* For ELF we need the `.type' directive to make shared libs work right.  */ +#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg; +#define ASM_SIZE_DIRECTIVE(name) .size name,.-name; + +/* In ELF C symbols are asm symbols.  */ +#undef	NO_UNDERSCORES +#define NO_UNDERSCORES + +/* Define an entry point visible from C. + +   There is currently a bug in gdb which prevents us from specifying +   incomplete stabs information.  Fake some entries here which specify +   the current source file.  */ +#define	ENTRY(name)							      \ +  STABS_CURRENT_FILE1("")						      \ +  STABS_CURRENT_FILE(name)						      \ +  ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name);				      \ +  ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function)			      \ +  .align ALIGNARG(4);							      \ +  STABS_FUN(name)							      \ +  C_LABEL(name)								      \ +  cfi_startproc;							      \ +  CALL_MCOUNT + +#undef	END +#define END(name)							      \ +  cfi_endproc;								      \ +  ASM_SIZE_DIRECTIVE(name)						      \ +  STABS_FUN_END(name) + +#ifdef HAVE_CPP_ASM_DEBUGINFO +/* Disable that goop, because we just pass -g through to the assembler +   and it generates proper line number information directly.  */ +# define STABS_CURRENT_FILE1(name) +# define STABS_CURRENT_FILE(name) +# define STABS_FUN(name) +# define STABS_FUN_END(name) +#else +/* Remove the following two lines once the gdb bug is fixed.  */ +#define STABS_CURRENT_FILE(name)					      \ +  STABS_CURRENT_FILE1 (#name) +#define STABS_CURRENT_FILE1(name)					      \ +  1: .stabs name,100,0,0,1b; +/* Emit stabs definition lines.  We use F(0,1) and define t(0,1) as `int', +   the same way gcc does it.  */ +#define STABS_FUN(name) STABS_FUN2(name, name##:F(0,1)) +#define STABS_FUN2(name, namestr)					      \ +  .stabs "int:t(0,1)=r(0,1);-2147483648;2147483647;",128,0,0,0;		      \ +  .stabs #namestr,36,0,0,name; +#define STABS_FUN_END(name)						      \ +  1: .stabs "",36,0,0,1b-name; +#endif + +/* If compiled for profiling, call `mcount' at the start of each function.  */ +#ifdef	PROF +/* The mcount code relies on a normal frame pointer being on the stack +   to locate our caller, so push one just for its benefit.  */ +#define CALL_MCOUNT \ +  pushl %ebp; cfi_adjust_cfa_offset (4); movl %esp, %ebp; \ +  cfi_def_cfa_register (ebp); call JUMPTARGET(mcount); \ +  popl %ebp; cfi_def_cfa (esp, 4); +#else +#define CALL_MCOUNT		/* Do nothing.  */ +#endif + +#ifdef	NO_UNDERSCORES +/* Since C identifiers are not normally prefixed with an underscore +   on this system, the asm identifier `syscall_error' intrudes on the +   C name space.  Make sure we use an innocuous name.  */ +#define	syscall_error	__syscall_error +#define mcount		_mcount +#endif + +#undef JUMPTARGET +#ifdef __PIC__ +#define JUMPTARGET(name)	name##@PLT +#define SYSCALL_PIC_SETUP \ +    pushl %ebx;								      \ +    cfi_adjust_cfa_offset (4);						      \ +    call 0f;								      \ +0:  popl %ebx;								      \ +    cfi_adjust_cfa_offset (-4);						      \ +    addl $_GLOBAL_OFFSET_TABLE+[.-0b], %ebx; + + +# define SETUP_PIC_REG(reg) \ +  .ifndef __x86.get_pc_thunk.reg;					      \ +  .section .gnu.linkonce.t.__x86.get_pc_thunk.reg,"ax",@progbits;	      \ +  .globl __x86.get_pc_thunk.reg;					      \ +  .hidden __x86.get_pc_thunk.reg;					      \ +  .type __x86.get_pc_thunk.reg,@function;				      \ +__x86.get_pc_thunk.reg:						      \ +  movl (%esp), %e##reg;							      \ +  ret;									      \ +  .size __x86.get_pc_thunk.reg, . - __x86.get_pc_thunk.reg;		      \ +  .previous;								      \ +  .endif;								      \ +  call __x86.get_pc_thunk.reg + +# define LOAD_PIC_REG(reg) \ +  SETUP_PIC_REG(reg); addl $_GLOBAL_OFFSET_TABLE_, %e##reg + +#else +#define JUMPTARGET(name)	name +#define SYSCALL_PIC_SETUP	/* Nothing.  */ +#endif + +/* Local label name for asm code. */ +#ifndef L +#ifdef HAVE_ELF +#define L(name)		.L##name +#else +#define L(name)		name +#endif +#endif + +#endif	/* __ASSEMBLER__ */ + +#ifndef offsetof +# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif + +/* For Linux we can use the system call table in the header file +	/usr/include/asm/unistd.h +   of the kernel.  But these symbols do not follow the SYS_* syntax +   so we have to redefine the `SYS_ify' macro here.  */ +#undef SYS_ify +#define SYS_ify(syscall_name)	__NR_##syscall_name + +#if defined USE_DL_SYSINFO \ +    && (!defined NOT_IN_libc || defined IS_IN_libpthread) +# define I386_USE_SYSENTER	1 +#else +# undef I386_USE_SYSENTER +#endif + +#ifdef __ASSEMBLER__ + +/* Linux uses a negative return value to indicate syscall errors, +   unlike most Unices, which use the condition codes' carry flag. + +   Since version 2.1 the return value of a system call might be +   negative even if the call succeeded.  E.g., the `lseek' system call +   might return a large offset.  Therefore we must not anymore test +   for < 0, but test for a real error by making sure the value in %eax +   is a real error number.  Linus said he will make sure the no syscall +   returns a value in -1 .. -4095 as a valid result so we can savely +   test with -4095.  */ + +/* We don't want the label for the error handle to be global when we define +   it here.  */ +#ifdef __PIC__ +# define SYSCALL_ERROR_LABEL 0f +#else +# define SYSCALL_ERROR_LABEL syscall_error +#endif + +#undef	PSEUDO +#define	PSEUDO(name, syscall_name, args)				      \ +  .text;								      \ +  ENTRY (name)								      \ +    DO_CALL (syscall_name, args);					      \ +    cmpl $-4095, %eax;							      \ +    jae SYSCALL_ERROR_LABEL;						      \ +  L(pseudo_end): + +#undef	PSEUDO_END +#define	PSEUDO_END(name)						      \ +  SYSCALL_ERROR_HANDLER							      \ +  END (name) + +#undef	PSEUDO_NOERRNO +#define	PSEUDO_NOERRNO(name, syscall_name, args)			      \ +  .text;								      \ +  ENTRY (name)								      \ +    DO_CALL (syscall_name, args) + +#undef	PSEUDO_END_NOERRNO +#define	PSEUDO_END_NOERRNO(name)					      \ +  END (name) + +#define ret_NOERRNO ret + +/* The function has to return the error code.  */ +#undef	PSEUDO_ERRVAL +#define	PSEUDO_ERRVAL(name, syscall_name, args) \ +  .text;								      \ +  ENTRY (name)								      \ +    DO_CALL (syscall_name, args);					      \ +    negl %eax + +#undef	PSEUDO_END_ERRVAL +#define	PSEUDO_END_ERRVAL(name) \ +  END (name) + +#define ret_ERRVAL ret + +#ifndef __PIC__ +# define SYSCALL_ERROR_HANDLER	/* Nothing here; code in sysdep.S is used.  */ +#else + +# ifdef RTLD_PRIVATE_ERRNO +#  define SYSCALL_ERROR_HANDLER						      \ +0:SETUP_PIC_REG(cx);							      \ +  addl $_GLOBAL_OFFSET_TABLE_, %ecx;					      \ +  xorl %edx, %edx;							      \ +  subl %eax, %edx;							      \ +  movl %edx, rtld_errno@GOTOFF(%ecx);					      \ +  orl $-1, %eax;							      \ +  jmp L(pseudo_end); + +# elif defined _LIBC_REENTRANT + +#  if USE___THREAD +#   ifndef NOT_IN_libc +#    define SYSCALL_ERROR_ERRNO __libc_errno +#   else +#    define SYSCALL_ERROR_ERRNO errno +#   endif +#   define SYSCALL_ERROR_HANDLER					      \ +0:SETUP_PIC_REG (cx);							      \ +  addl $_GLOBAL_OFFSET_TABLE_, %ecx;					      \ +  movl SYSCALL_ERROR_ERRNO@GOTNTPOFF(%ecx), %ecx;			      \ +  xorl %edx, %edx;							      \ +  subl %eax, %edx;							      \ +  SYSCALL_ERROR_HANDLER_TLS_STORE (%edx, %ecx);				      \ +  orl $-1, %eax;							      \ +  jmp L(pseudo_end); +#   ifndef NO_TLS_DIRECT_SEG_REFS +#    define SYSCALL_ERROR_HANDLER_TLS_STORE(src, destoff)		      \ +  movl src, %gs:(destoff) +#   else +#    define SYSCALL_ERROR_HANDLER_TLS_STORE(src, destoff)		      \ +  addl %gs:0, destoff;							      \ +  movl src, (destoff) +#   endif +#  else +#   define SYSCALL_ERROR_HANDLER					      \ +0:pushl %ebx;								      \ +  cfi_adjust_cfa_offset (4);						      \ +  cfi_rel_offset (ebx, 0);						      \ +  SETUP_PIC_REG (bx);							      \ +  addl $_GLOBAL_OFFSET_TABLE_, %ebx;					      \ +  xorl %edx, %edx;							      \ +  subl %eax, %edx;							      \ +  pushl %edx;								      \ +  cfi_adjust_cfa_offset (4);						      \ +  call __errno_location@PLT;					              \ +  popl %ecx;								      \ +  cfi_adjust_cfa_offset (-4);						      \ +  popl %ebx;								      \ +  cfi_adjust_cfa_offset (-4);						      \ +  cfi_restore (ebx);							      \ +  movl %ecx, (%eax);							      \ +  orl $-1, %eax;							      \ +  jmp L(pseudo_end); +/* A quick note: it is assumed that the call to `__errno_location' does +   not modify the stack!  */ +#  endif +# else +/* Store (- %eax) into errno through the GOT.  */ +#  define SYSCALL_ERROR_HANDLER						      \ +0:SETUP_PIC_REG(cx);							      \ +  addl $_GLOBAL_OFFSET_TABLE_, %ecx;					      \ +  xorl %edx, %edx;							      \ +  subl %eax, %edx;							      \ +  movl errno@GOT(%ecx), %ecx;						      \ +  movl %edx, (%ecx);							      \ +  orl $-1, %eax;							      \ +  jmp L(pseudo_end); +# endif	/* _LIBC_REENTRANT */ +#endif	/* __PIC__ */ + + +/* The original calling convention for system calls on Linux/i386 is +   to use int $0x80.  */ +#ifdef I386_USE_SYSENTER +# ifdef SHARED +#  define ENTER_KERNEL call *%gs:SYSINFO_OFFSET +# else +#  define ENTER_KERNEL call *_dl_sysinfo +# endif +#else +# define ENTER_KERNEL int $0x80 +#endif + +/* Linux takes system call arguments in registers: + +	syscall number	%eax	     call-clobbered +	arg 1		%ebx	     call-saved +	arg 2		%ecx	     call-clobbered +	arg 3		%edx	     call-clobbered +	arg 4		%esi	     call-saved +	arg 5		%edi	     call-saved +	arg 6		%ebp	     call-saved + +   The stack layout upon entering the function is: + +	24(%esp)	Arg# 6 +	20(%esp)	Arg# 5 +	16(%esp)	Arg# 4 +	12(%esp)	Arg# 3 +	 8(%esp)	Arg# 2 +	 4(%esp)	Arg# 1 +	  (%esp)	Return address + +   (Of course a function with say 3 arguments does not have entries for +   arguments 4, 5, and 6.) + +   The following code tries hard to be optimal.  A general assumption +   (which is true according to the data books I have) is that + +	2 * xchg	is more expensive than	pushl + movl + popl + +   Beside this a neat trick is used.  The calling conventions for Linux +   tell that among the registers used for parameters %ecx and %edx need +   not be saved.  Beside this we may clobber this registers even when +   they are not used for parameter passing. + +   As a result one can see below that we save the content of the %ebx +   register in the %edx register when we have less than 3 arguments +   (2 * movl is less expensive than pushl + popl). + +   Second unlike for the other registers we don't save the content of +   %ecx and %edx when we have more than 1 and 2 registers resp. + +   The code below might look a bit long but we have to take care for +   the pipelined processors (i586).  Here the `pushl' and `popl' +   instructions are marked as NP (not pairable) but the exception is +   two consecutive of these instruction.  This gives no penalty on +   other processors though.  */ + +#undef	DO_CALL +#define DO_CALL(syscall_name, args)			      		      \ +    PUSHARGS_##args							      \ +    DOARGS_##args							      \ +    movl $SYS_ify (syscall_name), %eax;					      \ +    ENTER_KERNEL							      \ +    POPARGS_##args + +#define PUSHARGS_0	/* No arguments to push.  */ +#define	DOARGS_0	/* No arguments to frob.  */ +#define	POPARGS_0	/* No arguments to pop.  */ +#define	_PUSHARGS_0	/* No arguments to push.  */ +#define _DOARGS_0(n)	/* No arguments to frob.  */ +#define	_POPARGS_0	/* No arguments to pop.  */ + +#define PUSHARGS_1	movl %ebx, %edx; L(SAVEBX1): PUSHARGS_0 +#define	DOARGS_1	_DOARGS_1 (4) +#define	POPARGS_1	POPARGS_0; movl %edx, %ebx; L(RESTBX1): +#define	_PUSHARGS_1	pushl %ebx; cfi_adjust_cfa_offset (4); \ +			cfi_rel_offset (ebx, 0); L(PUSHBX1): _PUSHARGS_0 +#define _DOARGS_1(n)	movl n(%esp), %ebx; _DOARGS_0(n-4) +#define	_POPARGS_1	_POPARGS_0; popl %ebx; cfi_adjust_cfa_offset (-4); \ +			cfi_restore (ebx); L(POPBX1): + +#define PUSHARGS_2	PUSHARGS_1 +#define	DOARGS_2	_DOARGS_2 (8) +#define	POPARGS_2	POPARGS_1 +#define _PUSHARGS_2	_PUSHARGS_1 +#define	_DOARGS_2(n)	movl n(%esp), %ecx; _DOARGS_1 (n-4) +#define	_POPARGS_2	_POPARGS_1 + +#define PUSHARGS_3	_PUSHARGS_2 +#define DOARGS_3	_DOARGS_3 (16) +#define POPARGS_3	_POPARGS_3 +#define _PUSHARGS_3	_PUSHARGS_2 +#define _DOARGS_3(n)	movl n(%esp), %edx; _DOARGS_2 (n-4) +#define _POPARGS_3	_POPARGS_2 + +#define PUSHARGS_4	_PUSHARGS_4 +#define DOARGS_4	_DOARGS_4 (24) +#define POPARGS_4	_POPARGS_4 +#define _PUSHARGS_4	pushl %esi; cfi_adjust_cfa_offset (4); \ +			cfi_rel_offset (esi, 0); L(PUSHSI1): _PUSHARGS_3 +#define _DOARGS_4(n)	movl n(%esp), %esi; _DOARGS_3 (n-4) +#define _POPARGS_4	_POPARGS_3; popl %esi; cfi_adjust_cfa_offset (-4); \ +			cfi_restore (esi); L(POPSI1): + +#define PUSHARGS_5	_PUSHARGS_5 +#define DOARGS_5	_DOARGS_5 (32) +#define POPARGS_5	_POPARGS_5 +#define _PUSHARGS_5	pushl %edi; cfi_adjust_cfa_offset (4); \ +			cfi_rel_offset (edi, 0); L(PUSHDI1): _PUSHARGS_4 +#define _DOARGS_5(n)	movl n(%esp), %edi; _DOARGS_4 (n-4) +#define _POPARGS_5	_POPARGS_4; popl %edi; cfi_adjust_cfa_offset (-4); \ +			cfi_restore (edi); L(POPDI1): + +#define PUSHARGS_6	_PUSHARGS_6 +#define DOARGS_6	_DOARGS_6 (40) +#define POPARGS_6	_POPARGS_6 +#define _PUSHARGS_6	pushl %ebp; cfi_adjust_cfa_offset (4); \ +			cfi_rel_offset (ebp, 0); L(PUSHBP1): _PUSHARGS_5 +#define _DOARGS_6(n)	movl n(%esp), %ebp; _DOARGS_5 (n-4) +#define _POPARGS_6	_POPARGS_5; popl %ebp; cfi_adjust_cfa_offset (-4); \ +			cfi_restore (ebp); L(POPBP1): + +#endif	/* __ASSEMBLER__ */ + + +/* Pointer mangling support.  */ +#if defined NOT_IN_libc && defined IS_IN_rtld +/* We cannot use the thread descriptor because in ld.so we use setjmp +   earlier than the descriptor is initialized.  Using a global variable +   is too complicated here since we have no PC-relative addressing mode.  */ +#else +# ifdef __ASSEMBLER__ +#  define PTR_MANGLE(reg)	xorl %gs:POINTER_GUARD, reg;		      \ +				roll $9, reg +#  define PTR_DEMANGLE(reg)	rorl $9, reg;				      \ +				xorl %gs:POINTER_GUARD, reg +# else +#  define PTR_MANGLE(var)	__asm__ ("xorl %%gs:%c2, %0\n"		      \ +				     "roll $9, %0"			      \ +				     : "=r" (var)			      \ +				     : "0" (var),			      \ +				       "i" (offsetof (tcbhead_t,	      \ +						      pointer_guard))) +#  define PTR_DEMANGLE(var)	__asm__ ("rorl $9, %0\n"			      \ +				     "xorl %%gs:%c2, %0"		      \ +				     : "=r" (var)			      \ +				     : "0" (var),			      \ +				       "i" (offsetof (tcbhead_t,	      \ +						      pointer_guard))) +# endif +#endif + +#endif /* linux/i386/sysdep.h */ diff --git a/libc/sysdeps/linux/i386/vfork.S b/libc/sysdeps/linux/i386/vfork.S index 8005ff1d2..c9db2f48c 100644 --- a/libc/sysdeps/linux/i386/vfork.S +++ b/libc/sysdeps/linux/i386/vfork.S @@ -18,9 +18,19 @@  __vfork:  	popl %ecx + +#ifdef SAVE_PID +	SAVE_PID +#endif +  	movl $__NR_vfork,%eax  	int $0x80  	pushl %ecx + +#ifdef RESTORE_PID +	RESTORE_PID +#endif +  	cmpl $-4095,%eax  	jae __syscall_error  	ret diff --git a/libc/sysdeps/linux/mips/Makefile.arch b/libc/sysdeps/linux/mips/Makefile.arch index 2570a6988..73e64991c 100644 --- a/libc/sysdeps/linux/mips/Makefile.arch +++ b/libc/sysdeps/linux/mips/Makefile.arch @@ -7,11 +7,15 @@  CSRC := \  	__longjmp.c  brk.c setjmp_aux.c mmap.c __syscall_error.c \ -	cacheflush.c pread_write.c sysmips.c _test_and_set.c sigaction.c \ +	cacheflush.c pread_write.c sysmips.c _test_and_set.c \  	readahead.c  ifeq ($(UCLIBC_HAS_ADVANCED_REALTIME),y) -        CSRC += posix_fadvise.c posix_fadvise64.c +CSRC += posix_fadvise.c posix_fadvise64.c +endif + +ifneq ($(UCLIBC_HAS_THREADS_NATIVE),y) +CSRC += sigaction.c  endif  SSRC := bsd-_setjmp.S bsd-setjmp.S setjmp.S clone.S syscall.S pipe.S diff --git a/libc/sysdeps/linux/mips/clone.S b/libc/sysdeps/linux/mips/clone.S index 15fa29558..a53d5c492 100644 --- a/libc/sysdeps/linux/mips/clone.S +++ b/libc/sysdeps/linux/mips/clone.S @@ -132,3 +132,4 @@ L(__thread_start):          jal             _exit  #endif  	.end  __thread_start +weak_alias(clone, __clone) diff --git a/libc/sysdeps/linux/mips/sys/asm.h b/libc/sysdeps/linux/mips/sys/asm.h index 79d143975..d424ed3b1 100644 --- a/libc/sysdeps/linux/mips/sys/asm.h +++ b/libc/sysdeps/linux/mips/sys/asm.h @@ -472,4 +472,20 @@ symbol		=	value  # define MTC0	dmtc0  #endif +/* The MIPS archtectures do not have a uniform memory model.  Particular +   platforms may provide additional guarantees - for instance, the R4000 +   LL and SC instructions implicitly perform a SYNC, and the 4K promises +   strong ordering. + +   However, in the absence of those guarantees, we must assume weak ordering +   and SYNC explicitly where necessary. + +   Some obsolete MIPS processors may not support the SYNC instruction.  This +   applies to "true" MIPS I processors; most of the processors which compile +   using MIPS I implement parts of MIPS II.  */ + +#ifndef MIPS_SYNC +# define MIPS_SYNC      sync +#endif +  #endif /* sys/asm.h */ diff --git a/libc/sysdeps/linux/mips/sys/regdef.h b/libc/sysdeps/linux/mips/sys/regdef.h index 9d2c4c1c4..2d94130af 100644 --- a/libc/sysdeps/linux/mips/sys/regdef.h +++ b/libc/sysdeps/linux/mips/sys/regdef.h @@ -20,6 +20,8 @@  #ifndef _SYS_REGDEF_H  #define _SYS_REGDEF_H +#include <sgidefs.h> +  /*   * Symbolic register names for 32 bit ABI   */ diff --git a/libc/sysdeps/linux/mips/syscall_error.S b/libc/sysdeps/linux/mips/syscall_error.S new file mode 100644 index 000000000..1e348ad4a --- /dev/null +++ b/libc/sysdeps/linux/mips/syscall_error.S @@ -0,0 +1,82 @@ +/* Copyright (C) 1992, 1993, 1994, 1997, 1998, 1999, 2000, 2002, 2003 +   Free Software Foundation, Inc. +   This file is part of the GNU C Library. +   Contributed by Brendan Kehoe (brendan@zen.org). + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +#include <sys/asm.h> +#include <sysdep.h> +#include <bits/errno.h> + +#ifdef __UCLIBC_HAS_THREADS__ + +LOCALSZ= 3 +FRAMESZ= (((NARGSAVE+LOCALSZ)*SZREG)+ALSZ)&ALMASK +RAOFF= FRAMESZ-(1*SZREG) +GPOFF= FRAMESZ-(2*SZREG) +V0OFF= FRAMESZ-(3*SZREG) + +ENTRY(__syscall_error) +#ifdef __PIC__ +	.set noat +	SETUP_GPX (AT) +	.set at +#endif +	PTR_SUBU sp, FRAMESZ +	.set noat +	SETUP_GPX64(GPOFF,AT) +	.set at +#ifdef __PIC__ +	SAVE_GP(GPOFF) +#endif +	REG_S	v0, V0OFF(sp) +	REG_S	ra, RAOFF(sp) + +	/* Find our per-thread errno address  */ +	jal	__errno_location + +	/* Store the error value.  */ +	REG_L	t0, V0OFF(sp) +	sw	t0, 0(v0) + +	/* And just kick back a -1.  */ +	REG_L	ra, RAOFF(sp) +	RESTORE_GP64 +	PTR_ADDU sp, FRAMESZ +	li	v0, -1 +	j	ra +	END(__syscall_error) + +#else /* __UCLIBC_HAS_THREADS__ */ + + +ENTRY(__syscall_error) +#ifdef __PIC__ +	SETUP_GPX (AT) +#endif +	SETUP_GPX64 (t9, AT) + +	/* Store it in errno... */ +	sw v0, errno + +	/* And just kick back a -1.  */ +	li v0, -1 + +	RESTORE_GP64 +	j ra +	END(__syscall_error) +#endif  /* __UCLIBC_HAS_THREADS__ */ diff --git a/libc/sysdeps/linux/mips/sysdep.h b/libc/sysdeps/linux/mips/sysdep.h new file mode 100644 index 000000000..56d159073 --- /dev/null +++ b/libc/sysdeps/linux/mips/sysdep.h @@ -0,0 +1,391 @@ +/* Copyright (C) 1992, 1995, 1997, 1999, 2000, 2002, 2003, 2004 +   Free Software Foundation, Inc. +   This file is part of the GNU C Library. +   Contributed by Brendan Kehoe (brendan@zen.org). + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +#ifndef _LINUX_MIPS_SYSDEP_H +#define _LINUX_MIPS_SYSDEP_H 1 + +#include <sgidefs.h> +#include <common/sysdep.h> + +/* For Linux we can use the system call table in the header file +   /usr/include/asm/unistd.h +   of the kernel.  But these symbols do not follow the SYS_* syntax +   so we have to redefine the `SYS_ify' macro here.  */ + +#undef SYS_ify +#ifdef __STDC__ +# define SYS_ify(syscall_name)	__NR_##syscall_name +#else +# define SYS_ify(syscall_name)	__NR_/**/syscall_name +#endif + +#ifdef __ASSEMBLER__ + +#include <regdef.h> + +#define ENTRY(name) 					\ +  .globl name;						\ +  .align 2;						\ +  .ent name,0;						\ +  name##: + +#undef END +#define	END(function)					\ +		.end	function;			\ +		.size	function,.-function + +#define ret	j ra ; nop + +#undef PSEUDO_END +#define PSEUDO_END(sym) .end sym; .size sym,.-sym + +#define PSEUDO_NOERRNO(name, syscall_name, args)	\ +  .align 2;						\ +  ENTRY(name)						\ +  .set noreorder;					\ +  li v0, SYS_ify(syscall_name);				\ +  syscall + +#undef PSEUDO_END_NOERRNO +#define PSEUDO_END_NOERRNO(sym) .end sym; .size sym,.-sym + +#define ret_NOERRNO ret + +#define PSEUDO_ERRVAL(name, syscall_name, args)		\ +  .align 2;						\ +  ENTRY(name)						\ +  .set noreorder;					\ +  li v0, SYS_ify(syscall_name);				\ +  syscall + +#undef PSEUDO_END_ERRVAL +#define PSEUDO_END_ERRVAL(sym) .end sym; .size sym,.-sym + +#define ret_ERRVAL ret + +#define r0	v0 +#define r1	v1 +/* The mips move insn is d,s.  */ +#define MOVE(x,y)	move y , x + +#if _MIPS_SIM == _ABIO32 +# define L(label) $L ## label +#else +# define L(label) .L ## label +#endif + +/* Note that while it's better structurally, going back to call __syscall_error +   can make things confusing if you're debugging---it looks like it's jumping +   backwards into the previous fn.  */ + +#ifdef __PIC__ +#define PSEUDO(name, syscall_name, args) 		\ +  .align 2;						\ +  99: la t9,__syscall_error;				\ +  jr t9;						\ +  ENTRY(name)						\ +  .set noreorder;					\ +  .cpload t9;						\ +  li v0, SYS_ify(syscall_name);				\ +  syscall;						\ +  .set reorder;						\ +  bne a3, zero, 99b;					\ +L(syse1): +#else +#define PSEUDO(name, syscall_name, args) 		\ +  .set noreorder;					\ +  .align 2;						\ +  99: j __syscall_error;				\ +  nop;							\ +  ENTRY(name)						\ +  .set noreorder;					\ +  li v0, SYS_ify(syscall_name);				\ +  syscall;						\ +  .set reorder;						\ +  bne a3, zero, 99b;					\ +L(syse1): +#endif + +/* We don't want the label for the error handler to be visible in the symbol +   table when we define it here.  */ +#ifdef __PIC__ +# define SYSCALL_ERROR_LABEL 99b +#endif + +#else   /* ! __ASSEMBLER__ */ + +/* Define a macro which expands into the inline wrapper code for a system +   call.  */ +#undef INLINE_SYSCALL +#define INLINE_SYSCALL(name, nr, args...)				\ +  ({ INTERNAL_SYSCALL_DECL(err);					\ +     long result_var = INTERNAL_SYSCALL (name, err, nr, args);		\ +     if ( INTERNAL_SYSCALL_ERROR_P (result_var, err) )			\ +       {								\ +	 __set_errno (INTERNAL_SYSCALL_ERRNO (result_var, err));	\ +	 result_var = -1L;						\ +       }								\ +     result_var; }) + +#undef INTERNAL_SYSCALL_DECL +#define INTERNAL_SYSCALL_DECL(err) long err + +#undef INTERNAL_SYSCALL_ERROR_P +#define INTERNAL_SYSCALL_ERROR_P(val, err)   ((long) (err)) + +#undef INTERNAL_SYSCALL_ERRNO +#define INTERNAL_SYSCALL_ERRNO(val, err)     (val) + +#undef INTERNAL_SYSCALL +#define INTERNAL_SYSCALL(name, err, nr, args...) \ +	internal_syscall##nr (, "li\t$2, %2\t\t\t# " #name "\n\t",	\ +			      "i" (SYS_ify (name)), err, args) + +#undef INTERNAL_SYSCALL_NCS +#define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \ +	internal_syscall##nr (= number, , "r" (__v0), err, args) +#undef internal_syscall0 +#define internal_syscall0(ncs_init, cs_init, input, err, dummy...)	\ +({									\ +	long _sys_result;						\ +									\ +	{								\ +	register long __v0 __asm__("$2") ncs_init;			\ +	register long __a3 __asm__("$7");				\ +	__asm__ volatile (						\ +	".set\tnoreorder\n\t"						\ +	cs_init								\ +	"syscall\n\t"							\ +	".set reorder"							\ +	: "=r" (__v0), "=r" (__a3)					\ +	: input								\ +	: __SYSCALL_CLOBBERS);						\ +	err = __a3;							\ +	_sys_result = __v0;						\ +	}								\ +	_sys_result;							\ +}) + +#undef internal_syscall1 +#define internal_syscall1(ncs_init, cs_init, input, err, arg1)		\ +({									\ +	long _sys_result;						\ +									\ +	{								\ +	register long __v0 __asm__("$2") ncs_init;			\ +	register long __a0 __asm__("$4") = (long) arg1;			\ +	register long __a3 __asm__("$7");				\ +	__asm__ volatile (						\ +	".set\tnoreorder\n\t"						\ +	cs_init								\ +	"syscall\n\t"							\ +	".set reorder"							\ +	: "=r" (__v0), "=r" (__a3)					\ +	: input, "r" (__a0)						\ +	: __SYSCALL_CLOBBERS);						\ +	err = __a3;							\ +	_sys_result = __v0;						\ +	}								\ +	_sys_result;							\ +}) + +#undef internal_syscall2 +#define internal_syscall2(ncs_init, cs_init, input, err, arg1, arg2)	\ +({									\ +	long _sys_result;						\ +									\ +	{								\ +	register long __v0 __asm__("$2") ncs_init;			\ +	register long __a0 __asm__("$4") = (long) arg1;			\ +	register long __a1 __asm__("$5") = (long) arg2;			\ +	register long __a3 __asm__("$7");				\ +	__asm__ volatile (						\ +	".set\tnoreorder\n\t"						\ +	cs_init								\ +	"syscall\n\t"							\ +	".set\treorder"							\ +	: "=r" (__v0), "=r" (__a3)					\ +	: input, "r" (__a0), "r" (__a1)					\ +	: __SYSCALL_CLOBBERS);						\ +	err = __a3;							\ +	_sys_result = __v0;						\ +	}								\ +	_sys_result;							\ +}) + +#undef internal_syscall3 +#define internal_syscall3(ncs_init, cs_init, input, err, arg1, arg2, arg3)\ +({									\ +	long _sys_result;						\ +									\ +	{								\ +	register long __v0 __asm__("$2") ncs_init;			\ +	register long __a0 __asm__("$4") = (long) arg1;			\ +	register long __a1 __asm__("$5") = (long) arg2;			\ +	register long __a2 __asm__("$6") = (long) arg3;			\ +	register long __a3 __asm__("$7");				\ +	__asm__ volatile (						\ +	".set\tnoreorder\n\t"						\ +	cs_init								\ +	"syscall\n\t"							\ +	".set\treorder"							\ +	: "=r" (__v0), "=r" (__a3)					\ +	: input, "r" (__a0), "r" (__a1), "r" (__a2)			\ +	: __SYSCALL_CLOBBERS);						\ +	err = __a3;							\ +	_sys_result = __v0;						\ +	}								\ +	_sys_result;							\ +}) + +#undef internal_syscall4 +#define internal_syscall4(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4)\ +({									\ +	long _sys_result;						\ +									\ +	{								\ +	register long __v0 __asm__("$2") ncs_init;			\ +	register long __a0 __asm__("$4") = (long) arg1;			\ +	register long __a1 __asm__("$5") = (long) arg2;			\ +	register long __a2 __asm__("$6") = (long) arg3;			\ +	register long __a3 __asm__("$7") = (long) arg4;			\ +	__asm__ volatile (						\ +	".set\tnoreorder\n\t"						\ +	cs_init								\ +	"syscall\n\t"							\ +	".set\treorder"							\ +	: "=r" (__v0), "+r" (__a3)					\ +	: input, "r" (__a0), "r" (__a1), "r" (__a2)			\ +	: __SYSCALL_CLOBBERS);						\ +	err = __a3;							\ +	_sys_result = __v0;						\ +	}								\ +	_sys_result;							\ +}) + +/* We need to use a frame pointer for the functions in which we +   adjust $sp around the syscall, or debug information and unwind +   information will be $sp relative and thus wrong during the syscall.  As +   of GCC 3.4.3, this is sufficient.  */ +#define FORCE_FRAME_POINTER alloca (4) + +#undef internal_syscall5 +#define internal_syscall5(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5)\ +({									\ +	long _sys_result;						\ +									\ +	FORCE_FRAME_POINTER;						\ +	{								\ +	register long __v0 __asm__("$2") ncs_init;			\ +	register long __a0 __asm__("$4") = (long) arg1;			\ +	register long __a1 __asm__("$5") = (long) arg2;			\ +	register long __a2 __asm__("$6") = (long) arg3;			\ +	register long __a3 __asm__("$7") = (long) arg4;			\ +	__asm__ volatile (						\ +	".set\tnoreorder\n\t"						\ +	"subu\t$29, 32\n\t"						\ +	"sw\t%6, 16($29)\n\t"						\ +	cs_init								\ +	"syscall\n\t"							\ +	"addiu\t$29, 32\n\t"						\ +	".set\treorder"							\ +	: "=r" (__v0), "+r" (__a3)					\ +	: input, "r" (__a0), "r" (__a1), "r" (__a2),			\ +	  "r" ((long)arg5)						\ +	: __SYSCALL_CLOBBERS);						\ +	err = __a3;							\ +	_sys_result = __v0;						\ +	}								\ +	_sys_result;							\ +}) + +#undef internal_syscall6 +#define internal_syscall6(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5, arg6)\ +({									\ +	long _sys_result;						\ +									\ +	FORCE_FRAME_POINTER;						\ +	{								\ +	register long __v0 __asm__("$2") ncs_init;			\ +	register long __a0 __asm__("$4") = (long) arg1;			\ +	register long __a1 __asm__("$5") = (long) arg2;			\ +	register long __a2 __asm__("$6") = (long) arg3;			\ +	register long __a3 __asm__("$7") = (long) arg4;			\ +	__asm__ volatile (						\ +	".set\tnoreorder\n\t"						\ +	"subu\t$29, 32\n\t"						\ +	"sw\t%6, 16($29)\n\t"						\ +	"sw\t%7, 20($29)\n\t"						\ +	cs_init								\ +	"syscall\n\t"							\ +	"addiu\t$29, 32\n\t"						\ +	".set\treorder"							\ +	: "=r" (__v0), "+r" (__a3)					\ +	: input, "r" (__a0), "r" (__a1), "r" (__a2),			\ +	  "r" ((long)arg5), "r" ((long)arg6)				\ +	: __SYSCALL_CLOBBERS);						\ +	err = __a3;							\ +	_sys_result = __v0;						\ +	}								\ +	_sys_result;							\ +}) + +#undef internal_syscall7 +#define internal_syscall7(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\ +({									\ +	long _sys_result;						\ +									\ +	FORCE_FRAME_POINTER;						\ +	{								\ +	register long __v0 __asm__("$2") ncs_init;			\ +	register long __a0 __asm__("$4") = (long) arg1;			\ +	register long __a1 __asm__("$5") = (long) arg2;			\ +	register long __a2 __asm__("$6") = (long) arg3;			\ +	register long __a3 __asm__("$7") = (long) arg4;			\ +	__asm__ volatile (						\ +	".set\tnoreorder\n\t"						\ +	"subu\t$29, 32\n\t"						\ +	"sw\t%6, 16($29)\n\t"						\ +	"sw\t%7, 20($29)\n\t"						\ +	"sw\t%8, 24($29)\n\t"						\ +	cs_init								\ +	"syscall\n\t"							\ +	"addiu\t$29, 32\n\t"						\ +	".set\treorder"							\ +	: "=r" (__v0), "+r" (__a3)					\ +	: input, "r" (__a0), "r" (__a1), "r" (__a2),			\ +	  "r" ((long)arg5), "r" ((long)arg6), "r" ((long)arg7)		\ +	: __SYSCALL_CLOBBERS);						\ +	err = __a3;							\ +	_sys_result = __v0;						\ +	}								\ +	_sys_result;							\ +}) + +#undef __SYSCALL_CLOBBERS +#define __SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", \ +	"$14", "$15", "$24", "$25", "memory" + +/* Pointer mangling is not yet supported for MIPS.  */ +#define PTR_MANGLE(var) (void) (var) +#define PTR_DEMANGLE(var) (void) (var) + +#endif  /* __ASSEMBLER__ */ +#endif /* _LINUX_MIPS_SYSDEP_H */ diff --git a/libc/sysdeps/linux/mips/vfork.S b/libc/sysdeps/linux/mips/vfork.S new file mode 100644 index 000000000..8400df052 --- /dev/null +++ b/libc/sysdeps/linux/mips/vfork.S @@ -0,0 +1,97 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. +   This file is part of the GNU C Library. + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +/* vfork() is just a special case of clone().  */ + +#include <sys/asm.h> +#include <sysdep.h> + +#ifndef SAVE_PID +#define SAVE_PID +#endif + +#ifndef RESTORE_PID +#define RESTORE_PID +#endif + + +/* int vfork() */ + +	.text +LOCALSZ= 1 +FRAMESZ= (((NARGSAVE+LOCALSZ)*SZREG)+ALSZ)&ALMASK +GPOFF= FRAMESZ-(1*SZREG) +NESTED(__vfork,FRAMESZ,sp) +#ifdef __PIC__ +	SETUP_GP +#endif +	PTR_SUBU sp, FRAMESZ +	SETUP_GP64 (a5, __vfork) +#ifdef __PIC__ +	SAVE_GP (GPOFF) +#endif +#ifdef PROF +# if (_MIPS_SIM != _ABIO32) +	PTR_S		a5, GPOFF(sp) +# endif +	.set		noat +	move		$1, ra +# if (_MIPS_SIM == _ABIO32) +	subu		sp,sp,8 +# endif +	jal		_mcount +	.set		at +# if (_MIPS_SIM != _ABIO32) +	PTR_L		a5, GPOFF(sp) +# endif +#endif + +	PTR_ADDU	sp, FRAMESZ + +	SAVE_PID + +	li		a0, 0x4112	/* CLONE_VM | CLONE_VFORK | SIGCHLD */ +	move		a1, sp + +	/* Do the system call */ +	li		v0,__NR_clone +	syscall + +	RESTORE_PID + +	bnez		a3,L(error) + +	/* Successful return from the parent or child.  */ +	RESTORE_GP64 +	j		ra +	nop + +	/* Something bad happened -- no child created.  */ +L(error): +#ifdef __PIC__ +	PTR_LA		t9, __syscall_error +	RESTORE_GP64 +	jr		t9 +#else +	RESTORE_GP64 +	j		__syscall_error +#endif +	END(__vfork) + +.weak vfork; +	vfork = __vfork diff --git a/libc/sysdeps/linux/sh/Makefile.arch b/libc/sysdeps/linux/sh/Makefile.arch index 31beda111..3e32e1095 100644 --- a/libc/sysdeps/linux/sh/Makefile.arch +++ b/libc/sysdeps/linux/sh/Makefile.arch @@ -7,6 +7,6 @@  #  CSRC := \ -	mmap.c pipe.c __init_brk.c brk.c sbrk.c pread_write.c cacheflush.c +	mmap.c pipe.c __init_brk.c brk.c sbrk.c pread_write.c longjmp.c cacheflush.c -SSRC := setjmp.S __longjmp.S vfork.S clone.S ___fpscr_values.S +SSRC := setjmp.S __longjmp.S ___fpscr_values.S diff --git a/libc/sysdeps/linux/sh/bits/atomic.h b/libc/sysdeps/linux/sh/bits/atomic.h index 6bb7255c5..a099b43a8 100644 --- a/libc/sysdeps/linux/sh/bits/atomic.h +++ b/libc/sysdeps/linux/sh/bits/atomic.h @@ -54,6 +54,10 @@ typedef uintmax_t uatomic_max_t;      Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in      Japanese). +    Niibe Yutaka, "gUSA: User Space Atomicity with Little Kernel +    Modification", LinuxTag 2003, Rome. +    http://www.semmel.ch/Linuxtag-DVD/talks/170/paper.html (in English). +      B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for      Uniprocessors",  Proceedings of the Fifth Architectural Support for      Programming Languages and Operating Systems (ASPLOS), pp. 223-233, @@ -65,56 +69,44 @@ typedef uintmax_t uatomic_max_t;        r1:     saved stack pointer  */ -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ -  ({ __typeof (*(mem)) __result; \ -     __asm__ __volatile__ ("\ +/* Avoid having lots of different versions of compare and exchange, +   by having this one complicated version. Parameters: +      bwl:     b, w or l for 8, 16 and 32 bit versions. +      version: val or bool, depending on whether the result is the +               previous value or a bool indicating whether the transfer +               did happen (note this needs inverting before being +               returned in atomic_compare_and_exchange_bool). +*/ + +#define __arch_compare_and_exchange_n(mem, newval, oldval, bwl, version) \ +  ({ signed long __result; \ +     __asm __volatile ("\  	.align 2\n\  	mova 1f,r0\n\  	nop\n\  	mov r15,r1\n\  	mov #-8,r15\n\ -     0: mov.b @%1,%0\n\ +     0: mov." #bwl " @%1,%0\n\  	cmp/eq %0,%3\n\  	bf 1f\n\ -	mov.b %2,@%1\n\ -     1: mov r1,r15"\ -	: "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \ -	: "r0", "r1", "t", "memory"); \ +	mov." #bwl " %2,@%1\n\ +     1: mov r1,r15\n\ +     .ifeqs \"bool\",\"" #version "\"\n\ +        movt %0\n\ +     .endif\n"					\ +	: "=&r" (__result)			\ +	: "r" (mem), "r" (newval), "r" (oldval)	\ +	: "r0", "r1", "t", "memory");		\       __result; }) +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ +  __arch_compare_and_exchange_n(mem, newval, (int8_t)(oldval), b, val) +  #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ -  ({ __typeof (*(mem)) __result; \ -     __asm__ __volatile__ ("\ -	.align 2\n\ -	mova 1f,r0\n\ -	nop\n\ -	mov r15,r1\n\ -	mov #-8,r15\n\ -     0: mov.w @%1,%0\n\ -	cmp/eq %0,%3\n\ -	bf 1f\n\ -	mov.w %2,@%1\n\ -     1: mov r1,r15"\ -	: "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \ -	: "r0", "r1", "t", "memory"); \ -     __result; }) +  __arch_compare_and_exchange_n(mem, newval, (int16_t)(oldval), w, val)  #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -  ({ __typeof (*(mem)) __result; \ -     __asm__ __volatile__ ("\ -	.align 2\n\ -	mova 1f,r0\n\ -	nop\n\ -	mov r15,r1\n\ -	mov #-8,r15\n\ -     0: mov.l @%1,%0\n\ -	cmp/eq %0,%3\n\ -	bf 1f\n\ -	mov.l %2,@%1\n\ -     1: mov r1,r15"\ -	: "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \ -	: "r0", "r1", "t", "memory"); \ -     __result; }) +  __arch_compare_and_exchange_n(mem, newval, (int32_t)(oldval), l, val)  /* XXX We do not really need 64-bit compare-and-exchange.  At least     not in the moment.  Using it would mean causing portability @@ -122,298 +114,180 @@ typedef uintmax_t uatomic_max_t;     such an operation.  So don't define any code for now.  */  # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ -  (abort (), (__typeof (*mem)) 0) +  (abort (), 0) + +/* For "bool" routines, return if the exchange did NOT occur */ + +#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ +  (! __arch_compare_and_exchange_n(mem, newval, (int8_t)(oldval), b, bool)) + +#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ +  (! __arch_compare_and_exchange_n(mem, newval, (int16_t)(oldval), w, bool)) + +#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ +  (! __arch_compare_and_exchange_n(mem, newval, (int32_t)(oldval), l, bool)) + +# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ +  (abort (), 0) + +/* Similar to the above, have one template which can be used in a +   number of places. This version returns both the old and the new +   values of the location. Parameters: +      bwl:     b, w or l for 8, 16 and 32 bit versions. +      oper:    The instruction to perform on the old value. +   Note old is not sign extended, so should be an unsigned long. +*/ + +#define __arch_operate_old_new_n(mem, value, old, new, bwl, oper)	\ +  (void) ({ __asm __volatile ("\ +	.align 2\n\ +	mova 1f,r0\n\ +	mov r15,r1\n\ +	nop\n\ +	mov #-8,r15\n\ +     0: mov." #bwl " @%2,%0\n\ +	mov %0,%1\n\ +	" #oper " %3,%1\n\ +	mov." #bwl " %1,@%2\n\ +     1: mov r1,r15"			\ +	: "=&r" (old), "=&r"(new)	\ +	: "r" (mem), "r" (value)	\ +	: "r0", "r1", "memory");	\ +    }) + +#define __arch_exchange_and_add_8_int(mem, value)			\ +  ({ int32_t __value = (value), __new, __old;				\ +    __arch_operate_old_new_n((mem), __value, __old, __new, b, add);	\ +    __old; }) + +#define __arch_exchange_and_add_16_int(mem, value)			\ +  ({ int32_t __value = (value), __new, __old;				\ +    __arch_operate_old_new_n((mem), __value, __old, __new, w, add);	\ +    __old; }) + +#define __arch_exchange_and_add_32_int(mem, value)			\ +  ({ int32_t __value = (value), __new, __old;				\ +    __arch_operate_old_new_n((mem), __value, __old, __new, l, add);	\ +    __old; }) + +#define __arch_exchange_and_add_64_int(mem, value)			\ +  (abort (), 0)  #define atomic_exchange_and_add(mem, value) \ -  ({ __typeof (*(mem)) __result, __tmp, __value = (value); \ -     if (sizeof (*(mem)) == 1) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.b @%2,%0\n\ -	  add %0,%1\n\ -	  mov.b %1,@%2\n\ -       1: mov r1,r15"\ -	: "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "memory"); \ -     else if (sizeof (*(mem)) == 2) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.w @%2,%0\n\ -	  add %0,%1\n\ -	  mov.w %1,@%2\n\ -       1: mov r1,r15"\ -	: "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "memory"); \ -     else if (sizeof (*(mem)) == 4) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.l @%2,%0\n\ -	  add %0,%1\n\ -	  mov.l %1,@%2\n\ -       1: mov r1,r15"\ -	: "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "memory"); \ -     else \ -       { \ -	 __typeof (mem) memp = (mem); \ -	 do \ -	   __result = *memp; \ -	 while (__arch_compare_and_exchange_val_64_acq \ -		 (memp,	__result + __value, __result) == __result); \ -	 (void) __value; \ -       } \ -     __result; }) +  __atomic_val_bysize (__arch_exchange_and_add, int, mem, value) + + +/* Again, another template. We get a slight optimisation when the old value +   does not need to be returned. Parameters: +      bwl:     b, w or l for 8, 16 and 32 bit versions. +      oper:    The instruction to perform on the old value. +*/ + +#define __arch_operate_new_n(mem, value, bwl, oper)	 \ +  ({ int32_t __value = (value), __new; \ +     __asm __volatile ("\ +	.align 2\n\ +	mova 1f,r0\n\ +	mov r15,r1\n\ +	mov #-6,r15\n\ +     0: mov." #bwl " @%1,%0\n\ +	" #oper " %2,%0\n\ +	mov." #bwl " %0,@%1\n\ +     1: mov r1,r15"			\ +	: "=&r" (__new)			\ +	: "r" (mem), "r" (__value)	\ +	: "r0", "r1", "memory");	\ +     __new;				\ +  }) + +#define __arch_add_8_int(mem, value)		\ +  __arch_operate_new_n(mem, value, b, add) + +#define __arch_add_16_int(mem, value)		\ +  __arch_operate_new_n(mem, value, w, add) + +#define __arch_add_32_int(mem, value)		\ +  __arch_operate_new_n(mem, value, l, add) + +#define __arch_add_64_int(mem, value)		\ +  (abort (), 0)  #define atomic_add(mem, value) \ -  (void) ({ __typeof (*(mem)) __tmp, __value = (value); \ -	    if (sizeof (*(mem)) == 1) \ -	      __asm__ __volatile__ ("\ -		.align 2\n\ -		mova 1f,r0\n\ -		mov r15,r1\n\ -		mov #-6,r15\n\ -	     0: mov.b @%1,r2\n\ -		add r2,%0\n\ -		mov.b %0,@%1\n\ -	     1: mov r1,r15"\ -		: "=&r" (__tmp) : "r" (mem), "0" (__value) \ -		: "r0", "r1", "r2", "memory"); \ -	    else if (sizeof (*(mem)) == 2) \ -	      __asm__ __volatile__ ("\ -		.align 2\n\ -		mova 1f,r0\n\ -		mov r15,r1\n\ -		mov #-6,r15\n\ -	     0: mov.w @%1,r2\n\ -		add r2,%0\n\ -		mov.w %0,@%1\n\ -	     1: mov r1,r15"\ -		: "=&r" (__tmp) : "r" (mem), "0" (__value) \ -		: "r0", "r1", "r2", "memory"); \ -	    else if (sizeof (*(mem)) == 4) \ -	      __asm__ __volatile__ ("\ -		.align 2\n\ -		mova 1f,r0\n\ -		mov r15,r1\n\ -		mov #-6,r15\n\ -	     0: mov.l @%1,r2\n\ -		add r2,%0\n\ -		mov.l %0,@%1\n\ -	     1: mov r1,r15"\ -		: "=&r" (__tmp) : "r" (mem), "0" (__value) \ -		: "r0", "r1", "r2", "memory"); \ -	    else \ -	      { \ -		__typeof (*(mem)) oldval; \ -		__typeof (mem) memp = (mem); \ -		do \ -		  oldval = *memp; \ -		while (__arch_compare_and_exchange_val_64_acq \ -			(memp, oldval + __value, oldval) == oldval); \ -		(void) __value; \ -	      } \ -	    }) +  ((void) __atomic_val_bysize (__arch_add, int, mem, value)) + + +#define __arch_add_negative_8_int(mem, value)		\ +  (__arch_operate_new_n(mem, value, b, add) < 0) + +#define __arch_add_negative_16_int(mem, value)		\ +  (__arch_operate_new_n(mem, value, w, add) < 0) + +#define __arch_add_negative_32_int(mem, value)		\ +  (__arch_operate_new_n(mem, value, l, add) < 0) + +#define __arch_add_negative_64_int(mem, value)		\ +  (abort (), 0)  #define atomic_add_negative(mem, value) \ -  ({ unsigned char __result; \ -     __typeof (*(mem)) __tmp, __value = (value); \ -     if (sizeof (*(mem)) == 1) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.b @%2,r2\n\ -	  add r2,%1\n\ -	  mov.b %1,@%2\n\ -       1: mov r1,r15\n\ -	  shal %1\n\ -	  movt %0"\ -	: "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "r2", "t", "memory"); \ -     else if (sizeof (*(mem)) == 2) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.w @%2,r2\n\ -	  add r2,%1\n\ -	  mov.w %1,@%2\n\ -       1: mov r1,r15\n\ -	  shal %1\n\ -	  movt %0"\ -	: "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "r2", "t", "memory"); \ -     else if (sizeof (*(mem)) == 4) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.l @%2,r2\n\ -	  add r2,%1\n\ -	  mov.l %1,@%2\n\ -       1: mov r1,r15\n\ -	  shal %1\n\ -	  movt %0"\ -	: "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "r2", "t", "memory"); \ -     else \ -       abort (); \ -     __result; }) +  __atomic_bool_bysize (__arch_add_negative, int, mem, value) + + +#define __arch_add_zero_8_int(mem, value)		\ +  (__arch_operate_new_n(mem, value, b, add) == 0) + +#define __arch_add_zero_16_int(mem, value)		\ +  (__arch_operate_new_n(mem, value, w, add) == 0) + +#define __arch_add_zero_32_int(mem, value)		\ +  (__arch_operate_new_n(mem, value, l, add) == 0) + +#define __arch_add_zero_64_int(mem, value)		\ +  (abort (), 0)  #define atomic_add_zero(mem, value) \ -  ({ unsigned char __result; \ -     __typeof (*(mem)) __tmp, __value = (value); \ -     if (sizeof (*(mem)) == 1) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.b @%2,r2\n\ -	  add r2,%1\n\ -	  mov.b %1,@%2\n\ -       1: mov r1,r15\n\ -	  tst %1,%1\n\ -	  movt %0"\ -	: "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "r2", "t", "memory"); \ -     else if (sizeof (*(mem)) == 2) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.w @%2,r2\n\ -	  add r2,%1\n\ -	  mov.w %1,@%2\n\ -       1: mov r1,r15\n\ -	  tst %1,%1\n\ -	  movt %0"\ -	: "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "r2", "t", "memory"); \ -     else if (sizeof (*(mem)) == 4) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  mov r15,r1\n\ -	  mov #-6,r15\n\ -       0: mov.l @%2,r2\n\ -	  add r2,%1\n\ -	  mov.l %1,@%2\n\ -       1: mov r1,r15\n\ -	  tst %1,%1\n\ -	  movt %0"\ -	: "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \ -	: "r0", "r1", "r2", "t", "memory"); \ -     else \ -       abort (); \ -     __result; }) +  __atomic_bool_bysize (__arch_add_zero, int, mem, value) +  #define atomic_increment_and_test(mem) atomic_add_zero((mem), 1)  #define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1) -#define atomic_bit_set(mem, bit) \ -  (void) ({ unsigned int __mask = 1 << (bit); \ -	    if (sizeof (*(mem)) == 1) \ -	      __asm__ __volatile__ ("\ -		.align 2\n\ -		mova 1f,r0\n\ -		mov r15,r1\n\ -		mov #-6,r15\n\ -	     0: mov.b @%0,r2\n\ -		or %1,r2\n\ -		mov.b r2,@%0\n\ -	     1: mov r1,r15"\ -		: : "r" (mem), "r" (__mask) \ -		: "r0", "r1", "r2", "memory"); \ -	    else if (sizeof (*(mem)) == 2) \ -	      __asm__ __volatile__ ("\ -		.align 2\n\ -		mova 1f,r0\n\ -		mov r15,r1\n\ -		mov #-6,r15\n\ -	     0: mov.w @%0,r2\n\ -		or %1,r2\n\ -		mov.w r2,@%0\n\ -	     1: mov r1,r15"\ -		: : "r" (mem), "r" (__mask) \ -		: "r0", "r1", "r2", "memory"); \ -	    else if (sizeof (*(mem)) == 4) \ -	      __asm__ __volatile__ ("\ -		.align 2\n\ -		mova 1f,r0\n\ -		mov r15,r1\n\ -		mov #-6,r15\n\ -	     0: mov.l @%0,r2\n\ -		or %1,r2\n\ -		mov.l r2,@%0\n\ -	     1: mov r1,r15"\ -		: : "r" (mem), "r" (__mask) \ -		: "r0", "r1", "r2", "memory"); \ -	    else \ -	      abort (); \ -	    }) - -#define atomic_bit_test_set(mem, bit) \ -  ({ unsigned int __mask = 1 << (bit); \ -     unsigned int __result = __mask; \ -     if (sizeof (*(mem)) == 1) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  nop\n\ -	  mov r15,r1\n\ -	  mov #-8,r15\n\ -       0: mov.b @%2,r2\n\ -	  or r2,%1\n\ -	  and r2,%0\n\ -	  mov.b %1,@%2\n\ -       1: mov r1,r15"\ -	: "=&r" (__result), "=&r" (__mask) \ -	: "r" (mem), "0" (__result), "1" (__mask) \ -	: "r0", "r1", "r2", "memory"); \ -     else if (sizeof (*(mem)) == 2) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  nop\n\ -	  mov r15,r1\n\ -	  mov #-8,r15\n\ -       0: mov.w @%2,r2\n\ -	  or r2,%1\n\ -	  and r2,%0\n\ -	  mov.w %1,@%2\n\ -       1: mov r1,r15"\ -	: "=&r" (__result), "=&r" (__mask) \ -	: "r" (mem), "0" (__result), "1" (__mask) \ -	: "r0", "r1", "r2", "memory"); \ -     else if (sizeof (*(mem)) == 4) \ -       __asm__ __volatile__ ("\ -	  .align 2\n\ -	  mova 1f,r0\n\ -	  nop\n\ -	  mov r15,r1\n\ -	  mov #-8,r15\n\ -       0: mov.l @%2,r2\n\ -	  or r2,%1\n\ -	  and r2,%0\n\ -	  mov.l %1,@%2\n\ -       1: mov r1,r15"\ -	: "=&r" (__result), "=&r" (__mask) \ -	: "r" (mem), "0" (__result), "1" (__mask) \ -	: "r0", "r1", "r2", "memory"); \ -     else \ -       abort (); \ -     __result; }) + +#define __arch_bit_set_8_int(mem, value)		\ +  __arch_operate_new_n(mem, 1<<(value), b, or) + +#define __arch_bit_set_16_int(mem, value)		\ +  __arch_operate_new_n(mem, 1<<(value), w, or) + +#define __arch_bit_set_32_int(mem, value)		\ +  __arch_operate_new_n(mem, 1<<(value), l, or) + +#define __arch_bit_set_64_int(mem, value)		\ +  (abort (), 0) + +#define __arch_add_64_int(mem, value)			\ +  (abort (), 0) + +#define atomic_bit_set(mem, value) \ +  ((void) __atomic_val_bysize (__arch_bit_set, int, mem, value)) + + +#define __arch_bit_test_set_8_int(mem, value)				\ +  ({ int32_t __value = 1<<(value), __new, __old;			\ +    __arch_operate_old_new_n((mem), __value, __old, __new, b, or);	\ +    __old & __value; }) + +#define __arch_bit_test_set_16_int(mem, value)				\ +  ({ int32_t __value = 1<<(value), __new, __old;			\ +    __arch_operate_old_new_n((mem), __value, __old, __new, w, or);	\ +    __old & __value; }) + +#define __arch_bit_test_set_32_int(mem, value)				\ +  ({ int32_t __value = 1<<(value), __new, __old;			\ +    __arch_operate_old_new_n((mem), __value, __old, __new, l, or);	\ +    __old & __value; }) + +#define __arch_bit_test_set_64_int(mem, value)	\ +  (abort (), 0) + +#define atomic_bit_test_set(mem, value) \ +  __atomic_val_bysize (__arch_bit_test_set, int, mem, value) diff --git a/libc/sysdeps/linux/sh/clone.S b/libc/sysdeps/linux/sh/clone.S index 3d18b6dd0..423a6c2f1 100644 --- a/libc/sysdeps/linux/sh/clone.S +++ b/libc/sysdeps/linux/sh/clone.S @@ -1,4 +1,4 @@ -/* Copyright (C) 1999, 2000 Free Software Foundation, Inc. +/* Copyright (C) 1999, 2000, 2003, 2004, 2007 Free Software Foundation, Inc.     This file is part of the GNU C Library.     The GNU C Library is free software; you can redistribute it and/or @@ -20,93 +20,94 @@     and invokes a function in the right context after its all over.  */  #include <features.h> -#include <sys/syscall.h> -#define _ERRNO_H +#include <asm/unistd.h> +#include <sysdep.h> +#define _ERRNO_H	1  #include <bits/errno.h> -#include <bits/sysnum.h> - - -#ifdef __PIC__ -#define PLTJMP(_x)	_x@PLT -#else -#define PLTJMP(_x)	_x +#ifdef RESET_PID +#include <tcb-offsets.h>  #endif +/* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg, +	     pid_t *ptid, void *tls, pid_t *ctid); */ - -/* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg); */ - -        .text - -.text -.align 4 -.type	clone,@function -.globl	clone; -clone: +	.text +ENTRY(__clone)  	/* sanity check arguments.  */  	tst	r4, r4 -	bt	0f -	tst	r5, r5 -	bf/s	1f -	 mov	#+__NR_clone, r3 -0:		 -	bra __syscall_error -	 mov	#-EINVAL, r4 - +	bt/s	0f +	 tst	r5, r5 +	bf	1f +0: +	bra	.Lsyscall_error +	 mov	#-EINVAL,r0  1:  	/* insert the args onto the new stack */  	mov.l	r7, @-r5  	/* save the function pointer as the 0th element */  	mov.l	r4, @-r5 -	 +  	/* do the system call */  	mov	r6, r4 -	trapa	#(__SH_SYSCALL_TRAP_BASE + 2) +	mov.l	@r15, r6 +	mov.l	@(8,r15), r7 +	mov.l	@(4,r15), r0 +	mov	#+SYS_ify(clone), r3 +	trapa	#0x15  	mov     r0, r1 -#ifdef __sh2__ -/* 12 arithmetic shifts for the crappy sh2, because shad doesn't exist!	 */ -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -	shar	r1 -#else		  	mov	#-12, r2  	shad	r2, r1 -#endif -	not	r1, r1			/* r1=0 means r0 = -1 to -4095 */ -	tst	r1, r1			/* i.e. error in linux */ -	bf/s	2f -	 tst	r0, r0 -        bra __syscall_error -	 mov	r0, r4 - -2: -	bt	3f +	not	r1, r1			// r1=0 means r0 = -1 to -4095 +	tst	r1, r1			// i.e. error in linux +	bf	.Lclone_end +.Lsyscall_error: +	SYSCALL_ERROR_HANDLER +.Lclone_end: +	tst	r0, r0 +	bt	2f +.Lpseudo_end:  	rts  	 nop +2: +	/* terminate the stack frame */ +	mov	#0, r14 +#ifdef RESET_PID +	mov	r4, r0 +	shlr16	r0 +	tst	#1, r0			// CLONE_THREAD = (1 << 16) +	bf/s	4f +	 mov	r4, r0 +	/* new pid */ +	shlr8	r0 +	tst	#1, r0			// CLONE_VM = (1 << 8) +	bf/s	3f +	 mov	#-1, r0 +	mov	#+SYS_ify(getpid), r3 +	trapa	#0x15  3: +	stc	gbr, r1 +	mov.w	.Lpidoff, r2 +	add	r1, r2 +	mov.l	r0, @r2 +	mov.w	.Ltidoff, r2 +	add	r1, r2 +	mov.l	r0, @r2 +4: +#endif  	/* thread starts */  	mov.l	@r15, r1  	jsr	@r1  	 mov.l	@(4,r15), r4  	/* we are done, passing the return value through r0  */ -	mov.l	.L1, r1 -#ifdef __PIC__ +	mov.l	.L3, r1 +#ifdef SHARED  	mov.l	r12, @-r15  	sts.l	pr, @-r15  	mov	r0, r4 -	mova	.LG, r0  /* .LG from syscall_error.S */ +	mova	.LG, r0  	mov.l	.LG, r12  	add	r0, r12 -	mova	.L1, r0 +	mova	.L3, r0  	add	r0, r1  	jsr	@r1  	 nop @@ -118,8 +119,16 @@ clone:  	 mov	r0, r4  #endif  	.align	2 -.L1: -	.long	PLTJMP( HIDDEN_JUMPTARGET(_exit)) -.size clone,.-clone; +.LG: +	.long	_GLOBAL_OFFSET_TABLE_ +.L3: +	.long	PLTJMP(C_SYMBOL_NAME(_exit)) +#ifdef RESET_PID +.Lpidoff: +	.word	PID - TLS_PRE_TCB_SIZE +.Ltidoff: +	.word	TID - TLS_PRE_TCB_SIZE +#endif +PSEUDO_END (__clone) -#include "syscall_error.S" +weak_alias (__clone, clone) diff --git a/libc/sysdeps/linux/sh/longjmp.c b/libc/sysdeps/linux/sh/longjmp.c new file mode 100644 index 000000000..dd0616d8a --- /dev/null +++ b/libc/sysdeps/linux/sh/longjmp.c @@ -0,0 +1,56 @@ +/* Copyright (C) 1991, 92, 94, 95, 97, 98, 2000 Free Software Foundation, Inc. +   Copyright (C) 2001 Hewlett-Packard Australia + + This program is free software; you can redistribute it and/or modify it under + the terms of the GNU Library General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) any + later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more + details. + + You should have received a copy of the GNU Library General Public License + along with this program; if not, write to the Free Software Foundation, Inc., + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Derived in part from the Linux-8086 C library, the GNU C Library, and several + other sundry sources.  Files within this library are copyright by their + respective copyright holders. +*/ + +#include <stddef.h> +#include <setjmp.h> +#include <signal.h> + +libc_hidden_proto(sigprocmask) + +extern int __longjmp(char *env, int val); +libc_hidden_proto(__longjmp) + +extern void _longjmp_unwind (jmp_buf env, int val); + + +/* Set the signal mask to the one specified in ENV, and jump +   to the position specified in ENV, causing the setjmp +   call there to return VAL, or 1 if VAL is 0.  */ +void __libc_siglongjmp (sigjmp_buf env, int val) +{ +  /* Perform any cleanups needed by the frames being unwound.  */ + +  _longjmp_unwind (env, val); + +  if (env[0].__mask_was_saved) +    /* Restore the saved signal mask.  */ +    (void) sigprocmask (SIG_SETMASK, &env[0].__saved_mask, +			  (sigset_t *) NULL); + +  /* Call the machine-dependent function to restore machine state.  */ +  __longjmp ((char *) env[0].__jmpbuf, val ?: 1); +} + +__asm__(".weak longjmp; longjmp = __libc_siglongjmp"); +__asm__(".weak _longjmp; _longjmp = __libc_siglongjmp"); +__asm__(".weak siglongjmp; siglongjmp = __libc_siglongjmp"); +strong_alias(__libc_siglongjmp, __libc_longjmp) diff --git a/libc/sysdeps/linux/sh/pread_write.c b/libc/sysdeps/linux/sh/pread_write.c index 84a28e766..86feb9cce 100644 --- a/libc/sysdeps/linux/sh/pread_write.c +++ b/libc/sysdeps/linux/sh/pread_write.c @@ -18,6 +18,13 @@  #include <stdint.h>  #include <endian.h> +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +#include <sysdep-cancel.h> +#else +#define SINGLE_THREAD_P 1 +#endif + +  #ifdef __NR_pread64             /* Newer kernels renamed but it's the same.  */  # ifdef __NR_pread  #  error "__NR_pread and __NR_pread64 both defined???" @@ -33,7 +40,15 @@ static __inline__ _syscall6(ssize_t, __syscall_pread, int, fd, void *, buf,  ssize_t __libc_pread(int fd, void *buf, size_t count, off_t offset)  { -	return(__syscall_pread(fd,buf,count,0,__LONG_LONG_PAIR(offset >> 31,offset))); +	if (SINGLE_THREAD_P) +		return(__syscall_pread(fd,buf,count,0,__LONG_LONG_PAIR(offset >> 31,offset))); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	ssize_t result = __syscall_pread(fd,buf,count,0,__LONG_LONG_PAIR(offset >> 31,offset)); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif  }  weak_alias(__libc_pread,pread) @@ -43,7 +58,16 @@ ssize_t __libc_pread64(int fd, void *buf, size_t count, off64_t offset)  {  	uint32_t low = offset & 0xffffffff;  	uint32_t high = offset >> 32; -	return(__syscall_pread(fd, buf, count, 0, __LONG_LONG_PAIR (high, low))); + +	if (SINGLE_THREAD_P) +		return __syscall_pread(fd, buf, count, 0, __LONG_LONG_PAIR (high, low)); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	ssize_t result = __syscall_pread(fd, buf, count, 0, __LONG_LONG_PAIR (high, low)); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif  }  weak_alias(__libc_pread64,pread64)  # endif /* __UCLIBC_HAS_LFS__  */ @@ -66,7 +90,16 @@ static __inline__ _syscall6(ssize_t, __syscall_pwrite, int, fd, const void *, bu  ssize_t __libc_pwrite(int fd, const void *buf, size_t count, off_t offset)  { -	return(__syscall_pwrite(fd,buf,count,0,__LONG_LONG_PAIR(offset >> 31,offset))); +	if (SINGLE_THREAD_P) +		return __syscall_pwrite(fd,buf,count,0,__LONG_LONG_PAIR(offset >> 31,offset)); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	ssize_t result = __syscall_pwrite(fd,buf,count,0,__LONG_LONG_PAIR(offset >> 31,offset)); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif +  }  weak_alias(__libc_pwrite,pwrite) @@ -76,7 +109,16 @@ ssize_t __libc_pwrite64(int fd, const void *buf, size_t count, off64_t offset)  {  	uint32_t low = offset & 0xffffffff;  	uint32_t high = offset >> 32; -	return(__syscall_pwrite(fd, buf, count, 0, __LONG_LONG_PAIR (high, low))); + +	if (SINGLE_THREAD_P) +		return __syscall_pwrite(fd, buf, count, 0, __LONG_LONG_PAIR (high, low)); + +#ifdef __UCLIBC_HAS_THREADS_NATIVE__ +	int oldtype = LIBC_CANCEL_ASYNC (); +	ssize_t result = __syscall_pwrite(fd, buf, count, 0, __LONG_LONG_PAIR (high, low)); +	LIBC_CANCEL_RESET (oldtype); +	return result; +#endif  }  weak_alias(__libc_pwrite64,pwrite64)  # endif /* __UCLIBC_HAS_LFS__  */ diff --git a/libc/sysdeps/linux/sh/setjmp.S b/libc/sysdeps/linux/sh/setjmp.S index 00475a008..0a81424e1 100644 --- a/libc/sysdeps/linux/sh/setjmp.S +++ b/libc/sysdeps/linux/sh/setjmp.S @@ -77,7 +77,7 @@ __sigsetjmp_intern:  	mov.l	r9, @-r4  	mov.l	r8, @-r4 -#ifdef __PIC__ +#ifdef __HAVE_SHARED__  	mov.l	.LG, r2  	mova	.LG, r0  	add	r0, r2 diff --git a/libc/sysdeps/linux/sh/syscall_error.S b/libc/sysdeps/linux/sh/syscall_error.S index f55dd535a..737950308 100644 --- a/libc/sysdeps/linux/sh/syscall_error.S +++ b/libc/sysdeps/linux/sh/syscall_error.S @@ -3,7 +3,7 @@ __syscall_error:  	/* Call errno_location, store '-r4' in errno and return -1 */  	mov.l	r12, @-r15  	sts.l	pr, @-r15 -#ifdef __PIC__ +#ifdef SHARED  	mova	.LG, r0  	mov.l	.LG, r12  	add	r0, r12 @@ -27,7 +27,7 @@ __syscall_error:  	.align	4 -#ifdef __PIC__ +#ifdef SHARED  1:	.long   __errno_location@GOT  .LG:	.long	_GLOBAL_OFFSET_TABLE_  #else diff --git a/libc/sysdeps/linux/sh/sysdep.h b/libc/sysdeps/linux/sh/sysdep.h index 6f182cd0e..bd6234292 100644 --- a/libc/sysdeps/linux/sh/sysdep.h +++ b/libc/sysdeps/linux/sh/sysdep.h @@ -144,7 +144,7 @@  #define ret_ERRVAL ret -#ifndef PIC +#ifndef __PIC__  # define SYSCALL_ERROR_HANDLER	\  	mov.l 0f,r1; \  	jmp @r1; \ @@ -246,7 +246,7 @@       0: .long _GLOBAL_OFFSET_TABLE_; \       1: .long errno@GOT  # endif	/* _LIBC_REENTRANT */ -#endif	/* PIC */ +#endif	/* __PIC__ */  # ifdef __SH4__  #  define SYSCALL_INST_PAD \ @@ -273,5 +273,24 @@      .align 2;				\   1: .long SYS_ify (syscall_name);	\   2: -  #endif	/* __ASSEMBLER__ */ + +/* Pointer mangling support.  */ +#if defined NOT_IN_libc && defined IS_IN_rtld +/* We cannot use the thread descriptor because in ld.so we use setjmp +   earlier than the descriptor is initialized.  Using a global variable +   is too complicated here since we have no PC-relative addressing mode.  */ +#else +# ifdef __ASSEMBLER__ +#  define PTR_MANGLE(reg, tmp) \ +     stc gbr,tmp; mov.l @(POINTER_GUARD,tmp),tmp; xor tmp,reg +#  define PTR_MANGLE2(reg, tmp)	xor tmp,reg +#  define PTR_DEMANGLE(reg, tmp)	PTR_MANGLE (reg, tmp) +#  define PTR_DEMANGLE2(reg, tmp)	PTR_MANGLE2 (reg, tmp) +# else +#  define PTR_MANGLE(var) \ +     (var) = (void *) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ()) +#  define PTR_DEMANGLE(var)	PTR_MANGLE (var) +# endif +#endif + diff --git a/libc/sysdeps/linux/sparc/Makefile.arch b/libc/sysdeps/linux/sparc/Makefile.arch index ffae27bc7..8a624205a 100644 --- a/libc/sysdeps/linux/sparc/Makefile.arch +++ b/libc/sysdeps/linux/sparc/Makefile.arch @@ -5,8 +5,15 @@  # Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.  # -CSRC := brk.c __syscall_error.c qp_ops.c sigaction.c +CSRC := brk.c __syscall_error.c qp_ops.c  SSRC := \ -	__longjmp.S fork.S vfork.S clone.S setjmp.S bsd-setjmp.S bsd-_setjmp.S \ +	__longjmp.S setjmp.S bsd-setjmp.S bsd-_setjmp.S \  	syscall.S urem.S udiv.S umul.S sdiv.S rem.S pipe.S + +ifneq ($(UCLIBC_HAS_THREADS_NATIVE),y) +CSRC += sigaction.c +SSRC += fork.S vfork.S +endif + + diff --git a/libc/sysdeps/linux/sparc/bits/atomic.h b/libc/sysdeps/linux/sparc/bits/atomic.h new file mode 100644 index 000000000..f625eb92a --- /dev/null +++ b/libc/sysdeps/linux/sparc/bits/atomic.h @@ -0,0 +1,329 @@ +/* Atomic operations.  sparc32 version. +   Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc. +   This file is part of the GNU C Library. +   Contributed by Jakub Jelinek <jakub@redhat.com>, 2003. + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +#ifndef _BITS_ATOMIC_H +#define _BITS_ATOMIC_H	1 + +#include <stdint.h> + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + + +/* We have no compare and swap, just test and set. +   The following implementation contends on 64 global locks +   per library and assumes no variable will be accessed using atomic.h +   macros from two different libraries.  */ + +__make_section_unallocated +  (".gnu.linkonce.b.__sparc32_atomic_locks, \"aw\", %nobits"); + +volatile unsigned char __sparc32_atomic_locks[64] +  __attribute__ ((nocommon, section (".gnu.linkonce.b.__sparc32_atomic_locks" +				     __sec_comment), +		  visibility ("hidden"))); + +#define __sparc32_atomic_do_lock(addr) \ +  do								      \ +    {								      \ +      unsigned int __old_lock;					      \ +      unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \ +			   & 63;				      \ +      do							      \ +	__asm __volatile ("ldstub %1, %0"			      \ +			  : "=r" (__old_lock),			      \ +			    "=m" (__sparc32_atomic_locks[__idx])      \ +			  : "m" (__sparc32_atomic_locks[__idx])	      \ +			  : "memory");				      \ +      while (__old_lock);					      \ +    }								      \ +  while (0) + +#define __sparc32_atomic_do_unlock(addr) \ +  do								      \ +    {								      \ +      __sparc32_atomic_locks[(((long) addr >> 2)		      \ +			      ^ ((long) addr >> 12)) & 63] = 0;	      \ +      __asm __volatile ("" ::: "memory");			      \ +    }								      \ +  while (0) + +#define __sparc32_atomic_do_lock24(addr) \ +  do								      \ +    {								      \ +      unsigned int __old_lock;					      \ +      do							      \ +	__asm __volatile ("ldstub %1, %0"			      \ +			  : "=r" (__old_lock), "=m" (*(addr))	      \ +			  : "m" (*(addr))			      \ +			  : "memory");				      \ +      while (__old_lock);					      \ +    }								      \ +  while (0) + +#define __sparc32_atomic_do_unlock24(addr) \ +  do								      \ +    {								      \ +      *(char *) (addr) = 0;					      \ +      __asm __volatile ("" ::: "memory");			      \ +    }								      \ +  while (0) + + +#ifndef SHARED +# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +({									      \ +  register __typeof (*(mem)) __acev_tmp __asm ("%g6");			      \ +  register __typeof (mem) __acev_mem __asm ("%g1") = (mem);		      \ +  register __typeof (*(mem)) __acev_oldval __asm ("%g5");		      \ +  __acev_tmp = (newval);						      \ +  __acev_oldval = (oldval);						      \ +  /* .word 0xcde05005 is cas [%g1], %g5, %g6.  Can't use cas here though,     \ +     because as will then mark the object file as V8+ arch.  */		      \ +  __asm __volatile (".word 0xcde05005"					      \ +		    : "+r" (__acev_tmp), "=m" (*__acev_mem)		      \ +		    : "r" (__acev_oldval), "m" (*__acev_mem),		      \ +		      "r" (__acev_mem) : "memory");			      \ +  __acev_tmp; }) +#endif + +/* The only basic operation needed is compare and exchange.  */ +#define __v7_compare_and_exchange_val_acq(mem, newval, oldval) \ +  ({ __typeof (mem) __acev_memp = (mem);			      \ +     __typeof (*mem) __acev_ret;				      \ +     __typeof (*mem) __acev_newval = (newval);			      \ +								      \ +     __sparc32_atomic_do_lock (__acev_memp);			      \ +     __acev_ret = *__acev_memp;					      \ +     if (__acev_ret == (oldval))				      \ +       *__acev_memp = __acev_newval;				      \ +     __sparc32_atomic_do_unlock (__acev_memp);			      \ +     __acev_ret; }) + +#define __v7_compare_and_exchange_bool_acq(mem, newval, oldval) \ +  ({ __typeof (mem) __aceb_memp = (mem);			      \ +     int __aceb_ret;						      \ +     __typeof (*mem) __aceb_newval = (newval);			      \ +								      \ +     __sparc32_atomic_do_lock (__aceb_memp);			      \ +     __aceb_ret = 0;						      \ +     if (*__aceb_memp == (oldval))				      \ +       *__aceb_memp = __aceb_newval;				      \ +     else							      \ +       __aceb_ret = 1;						      \ +     __sparc32_atomic_do_unlock (__aceb_memp);			      \ +     __aceb_ret; }) + +#define __v7_exchange_acq(mem, newval) \ +  ({ __typeof (mem) __acev_memp = (mem);			      \ +     __typeof (*mem) __acev_ret;				      \ +     __typeof (*mem) __acev_newval = (newval);			      \ +								      \ +     __sparc32_atomic_do_lock (__acev_memp);			      \ +     __acev_ret = *__acev_memp;					      \ +     *__acev_memp = __acev_newval;				      \ +     __sparc32_atomic_do_unlock (__acev_memp);			      \ +     __acev_ret; }) + +#define __v7_exchange_and_add(mem, value) \ +  ({ __typeof (mem) __acev_memp = (mem);			      \ +     __typeof (*mem) __acev_ret;				      \ +								      \ +     __sparc32_atomic_do_lock (__acev_memp);			      \ +     __acev_ret = *__acev_memp;					      \ +     *__acev_memp = __acev_ret + (value);			      \ +     __sparc32_atomic_do_unlock (__acev_memp);			      \ +     __acev_ret; }) + +/* Special versions, which guarantee that top 8 bits of all values +   are cleared and use those bits as the ldstub lock.  */ +#define __v7_compare_and_exchange_val_24_acq(mem, newval, oldval) \ +  ({ __typeof (mem) __acev_memp = (mem);			      \ +     __typeof (*mem) __acev_ret;				      \ +     __typeof (*mem) __acev_newval = (newval);			      \ +								      \ +     __sparc32_atomic_do_lock24 (__acev_memp);			      \ +     __acev_ret = *__acev_memp & 0xffffff;			      \ +     if (__acev_ret == (oldval))				      \ +       *__acev_memp = __acev_newval;				      \ +     else							      \ +       __sparc32_atomic_do_unlock24 (__acev_memp);		      \ +     __asm __volatile ("" ::: "memory");			      \ +     __acev_ret; }) + +#define __v7_exchange_24_rel(mem, newval) \ +  ({ __typeof (mem) __acev_memp = (mem);			      \ +     __typeof (*mem) __acev_ret;				      \ +     __typeof (*mem) __acev_newval = (newval);			      \ +								      \ +     __sparc32_atomic_do_lock24 (__acev_memp);			      \ +     __acev_ret = *__acev_memp & 0xffffff;			      \ +     *__acev_memp = __acev_newval;				      \ +     __asm __volatile ("" ::: "memory");			      \ +     __acev_ret; }) + +#ifdef SHARED + +/* When dynamically linked, we assume pre-v9 libraries are only ever +   used on pre-v9 CPU.  */ +# define __atomic_is_v9 0 + +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ +  __v7_compare_and_exchange_val_acq (mem, newval, oldval) + +# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ +  __v7_compare_and_exchange_bool_acq (mem, newval, oldval) + +# define atomic_exchange_acq(mem, newval) \ +  __v7_exchange_acq (mem, newval) + +# define atomic_exchange_and_add(mem, value) \ +  __v7_exchange_and_add (mem, value) + +# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ +  ({								      \ +     if (sizeof (*mem) != 4)					      \ +       abort ();						      \ +     __v7_compare_and_exchange_val_24_acq (mem, newval, oldval); }) + +# define atomic_exchange_24_rel(mem, newval) \ +  ({								      \ +     if (sizeof (*mem) != 4)					      \ +       abort ();						      \ +     __v7_exchange_24_rel (mem, newval); }) + +#else + + + +/* +   Here's what we'd like to do: + +   In libc.a/libpthread.a etc. we don't know if we'll be run on +   pre-v9 or v9 CPU.  To be interoperable with dynamically linked +   apps on v9 CPUs e.g. with process shared primitives, use cas insn +   on v9 CPUs and ldstub on pre-v9. + +   However, we have no good way to test at run time that I know of, +   so resort to the lowest common denominator (v7 ops) -austinf + */ +#define __atomic_is_v9 0 + +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ +  ({								      \ +     __typeof (*mem) __acev_wret;				      \ +     if (sizeof (*mem) != 4)					      \ +       abort ();						      \ +     if (__atomic_is_v9)					      \ +       __acev_wret						      \ +	 = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ +     else							      \ +       __acev_wret						      \ +	 = __v7_compare_and_exchange_val_acq (mem, newval, oldval);   \ +     __acev_wret; }) + +# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ +  ({								      \ +     int __acev_wret;						      \ +     if (sizeof (*mem) != 4)					      \ +       abort ();						      \ +     if (__atomic_is_v9)					      \ +       {							      \ +	 __typeof (oldval) __acev_woldval = (oldval);		      \ +	 __acev_wret						      \ +	   = __v9_compare_and_exchange_val_32_acq (mem, newval,	      \ +						   __acev_woldval)    \ +	     != __acev_woldval;					      \ +       }							      \ +     else							      \ +       __acev_wret						      \ +	 = __v7_compare_and_exchange_bool_acq (mem, newval, oldval);  \ +     __acev_wret; }) + +# define atomic_exchange_rel(mem, newval) \ +  ({								      \ +     __typeof (*mem) __acev_wret;				      \ +     if (sizeof (*mem) != 4)					      \ +       abort ();						      \ +     if (__atomic_is_v9)					      \ +       {							      \ +	 __typeof (mem) __acev_wmemp = (mem);			      \ +	 __typeof (*(mem)) __acev_wval = (newval);		      \ +	 do							      \ +	   __acev_wret = *__acev_wmemp;				      \ +	 while (__builtin_expect				      \ +		  (__v9_compare_and_exchange_val_32_acq (__acev_wmemp,\ +							 __acev_wval, \ +							 __acev_wret) \ +		   != __acev_wret, 0));				      \ +       }							      \ +     else							      \ +       __acev_wret = __v7_exchange_acq (mem, newval);		      \ +     __acev_wret; }) + +# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ +  ({								      \ +     __typeof (*mem) __acev_wret;				      \ +     if (sizeof (*mem) != 4)					      \ +       abort ();						      \ +     if (__atomic_is_v9)					      \ +       __acev_wret						      \ +	 = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ +     else							      \ +       __acev_wret						      \ +	 = __v7_compare_and_exchange_val_24_acq (mem, newval, oldval);\ +     __acev_wret; }) + +# define atomic_exchange_24_rel(mem, newval) \ +  ({								      \ +     __typeof (*mem) __acev_w24ret;				      \ +     if (sizeof (*mem) != 4)					      \ +       abort ();						      \ +     if (__atomic_is_v9)					      \ +       __acev_w24ret = atomic_exchange_rel (mem, newval);	      \ +     else							      \ +       __acev_w24ret = __v7_exchange_24_rel (mem, newval);	      \ +     __acev_w24ret; }) + +#endif + +#endif	/* bits/atomic.h */ diff --git a/libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h b/libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h index 1dbfa2b55..2d8cdd78b 100644 --- a/libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h +++ b/libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h @@ -36,7 +36,7 @@  #undef __UCLIBC_HAVE_ASM_GLOBAL_DOT_NAME__  /* define if target supports CFI pseudo ops */ -#undef __UCLIBC_HAVE_ASM_CFI_DIRECTIVES__ +#define __UCLIBC_HAVE_ASM_CFI_DIRECTIVES__  /* define if target supports IEEE signed zero floats */  #define __UCLIBC_HAVE_SIGNED_ZERO__ diff --git a/libc/sysdeps/linux/sparc/clone.S b/libc/sysdeps/linux/sparc/clone.S index 0e41ee0cb..2b6609531 100644 --- a/libc/sysdeps/linux/sparc/clone.S +++ b/libc/sysdeps/linux/sparc/clone.S @@ -1,4 +1,5 @@ -/* Copyright (C) 1996, 1997, 1998, 2000 Free Software Foundation, Inc. +/* Copyright (C) 1996, 1997, 1998, 2000, 2003, 2004, 2007 +   Free Software Foundation, Inc.     This file is part of the GNU C Library.     Contributed by Richard Henderson (rth@tamu.edu). @@ -20,47 +21,87 @@  /* clone() is even more special than fork() as it mucks with stacks     and invokes a function in the right context after its all over.  */ -#include <features.h> +#include <asm/errno.h>  #include <asm/unistd.h> +#include <tcb-offsets.h> +#include <sysdep.h> -/* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg); */ +#define CLONE_VM	0x00000100 +#define CLONE_THREAD	0x00010000 -.text -.global clone -.type   clone,%function -.align 4 +/* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg, +	     pid_t *ptid, void *tls, pid_t *ctid); */ -clone: +	.text +ENTRY (__clone)  	save	%sp,-96,%sp +	cfi_def_cfa_register(%fp) +	cfi_window_save +	cfi_register(%o7, %i7)  	/* sanity check arguments */ -	tst	%i0 -	be	__error -	orcc	%i1,%g0,%o1 -	be	__error -	mov	%i2,%o0 +	orcc	%i0,%g0,%g2 +	be	.Leinval +	 orcc	%i1,%g0,%o1 +	be	.Leinval +	 mov	%i2,%o0 + +	/* The child_stack is the top of the stack, allocate one +	   whole stack frame from that as this is what the kernel +	   expects.  */ +	sub	%o1, 96, %o1 +	mov	%i3, %g3 +	mov	%i2, %g4 + +	/* ptid */ +	mov	%i4,%o2 +	/* tls */ +	mov	%i5,%o3 +	/* ctid */ +	ld	[%fp+92],%o4  	/* Do the system call */  	set	__NR_clone,%g1  	ta	0x10 -	bcs	__error -	tst	%o1 +	bcs	.Lerror +	 tst	%o1  	bne	__thread_start -	nop -	ret -	restore %o0,%g0,%o0 - -__error: -	jmp __syscall_error +	 nop +	jmpl	%i7 + 8, %g0 +	 restore %o0,%g0,%o0 -.size clone,.-clone - -.type __thread_start,%function +.Leinval: +	mov	EINVAL, %o0 +.Lerror: +	call	__errno_location +	 mov	%o0, %i0 +	st	%i0,[%o0] +	jmpl	%i7 + 8, %g0 +	 restore %g0,-1,%o0 +END(__clone) +	.type	__thread_start,@function  __thread_start: -	call	%i0 -	mov	%i3,%o0 -	call	HIDDEN_JUMPTARGET(_exit),0 -	nop +#ifdef RESET_PID +	sethi	%hi(CLONE_THREAD), %l0 +	andcc	%g4, %l0, %g0 +	bne	1f +	 andcc	%g4, CLONE_VM, %g0 +	bne,a	2f +	 mov	-1,%o0 +	set	__NR_getpid,%g1 +	ta	0x10 +2: +	st	%o0,[%g7 + PID] +	st	%o0,[%g7 + TID] +1: +#endif +	mov	%g0, %fp	/* terminate backtrace */ +	call	%g2 +	 mov	%g3,%o0 +	call	exit,0 +	 nop + +	.size	__thread_start, .-__thread_start -.size __thread_start,.-__thread_start +weak_alias (__clone, clone) diff --git a/libc/sysdeps/linux/sparc/sigaction.c b/libc/sysdeps/linux/sparc/sigaction.c index 7140fd3a4..a22ac40af 100644 --- a/libc/sysdeps/linux/sparc/sigaction.c +++ b/libc/sysdeps/linux/sparc/sigaction.c @@ -34,8 +34,7 @@ _syscall5(int, rt_sigaction, int, a, int, b, int, c, int, d, int, e);  static void __rt_sigreturn_stub(void);  static void __sigreturn_stub(void); -libc_hidden_proto(sigaction) -int sigaction(int sig, const struct sigaction *act, struct sigaction *oact) +int __libc_sigaction(int sig, const struct sigaction *act, struct sigaction *oact)  {  	int ret;  	struct sigaction kact, koact; @@ -66,8 +65,10 @@ int sigaction(int sig, const struct sigaction *act, struct sigaction *oact)  	return ret;  } -libc_hidden_def(sigaction) -weak_alias(sigaction,__libc_sigaction) +#ifndef LIBC_SIGACTION +weak_alias(__libc_sigaction,sigaction) +libc_hidden_weak(sigaction) +#endif  static void  __rt_sigreturn_stub(void) diff --git a/libc/sysdeps/linux/sparc/sparcv9/clone.S b/libc/sysdeps/linux/sparc/sparcv9/clone.S new file mode 100644 index 000000000..9d101e239 --- /dev/null +++ b/libc/sysdeps/linux/sparc/sparcv9/clone.S @@ -0,0 +1,102 @@ +/* Copyright (C) 1997, 2000, 2007 Free Software Foundation, Inc. +   This file is part of the GNU C Library. +   Contributed by Richard Henderson (rth@tamu.edu). + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +/* clone() is even more special than fork() as it mucks with stacks +   and invokes a function in the right context after its all over.  */ + +#include <asm/errno.h> +#include <asm/unistd.h> +#include <tcb-offsets.h> +#include <sysdep.h> + +#define CLONE_VM	0x00000100 +#define CLONE_THREAD	0x00010000 + +/* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg, +	     pid_t *ptid, void *tls, pid_t *ctid); */ + +	.register	%g2,#scratch +	.register	%g3,#scratch + +	.text + +ENTRY (__clone) +	save	%sp, -192, %sp +	cfi_def_cfa_register(%fp) +	cfi_window_save +	cfi_register(%o7, %i7) + +	/* sanity check arguments */ +	brz,pn	%i0, 99f		/* fn non-NULL? */ +	 mov	%i0, %g2 +	brz,pn	%i1, 99f		/* child_stack non-NULL? */ +	 mov	%i2, %o0		/* clone flags */ + +	/* The child_stack is the top of the stack, allocate one +	   whole stack frame from that as this is what the kernel +	   expects.  Also, subtract STACK_BIAS.  */ +	sub	%i1, 192 + 0x7ff, %o1 +	mov	%i3, %g3 +	mov	%i2, %g4 + +	mov	%i4,%o2			/* PTID */ +	mov	%i5,%o3			/* TLS */ +	ldx	[%fp+0x7ff+176],%o4	/* CTID */ + +	/* Do the system call */ +	set	__NR_clone, %g1 +	ta	0x6d +	bcs,pn	%xcc, 98f +	 nop +	brnz,pn	%o1, __thread_start +	 nop +	jmpl	%i7 + 8, %g0 +	 restore %o0, %g0, %o0 +99:	mov	EINVAL, %o0 +98:	call	HIDDEN_JUMPTARGET(__errno_location) +	 mov	%o0, %i0 +	st	%i0, [%o0] +	jmpl	%i7 + 8, %g0 +	 restore %g0,-1,%o0 +END(__clone) + +	.type __thread_start,@function +__thread_start: +#ifdef RESET_PID +	sethi	%hi(CLONE_THREAD), %l0 +	andcc	%g4, %l0, %g0 +	bne,pt	%icc, 1f +	 andcc	%g4, CLONE_VM, %g0 +	bne,a,pn %icc, 2f +	 mov	-1,%o0 +	set	__NR_getpid,%g1 +	ta	0x6d +2:	st	%o0,[%g7 + PID] +	st	%o0,[%g7 + TID] +1: +#endif +	mov	%g0, %fp	/* terminate backtrace */ +	call	%g2 +	 mov	%g3,%o0 +	call	HIDDEN_JUMPTARGET(_exit),0 +	 nop + +	.size	__thread_start, .-__thread_start + +weak_alias (__clone, clone) diff --git a/libc/sysdeps/linux/sparc/sysdep.h b/libc/sysdeps/linux/sparc/sysdep.h new file mode 100644 index 000000000..cf3e3afd1 --- /dev/null +++ b/libc/sysdeps/linux/sparc/sysdep.h @@ -0,0 +1,69 @@ +#ifndef _LINUX_SPARC_SYSDEP_H +#define _LINUX_SPARC_SYSDEP_H 1 + +#include <common/sysdep.h> + +#undef ENTRY +#undef END + +#ifdef __ASSEMBLER__ + +#define LOADSYSCALL(x) mov __NR_##x, %g1 + +#define ENTRY(name)                 \ +    .align 4;                       \ +    .global C_SYMBOL_NAME(name);    \ +    .type   name, @function;        \ +C_LABEL(name)                       \ +    cfi_startproc; + +#define END(name)                   \ +    cfi_endproc;                    \ +    .size name, . - name + +#define LOC(name) .L##name + +	/* If the offset to __syscall_error fits into a signed 22-bit +	 * immediate branch offset, the linker will relax the call into +	 * a normal branch. +	 */ +#undef PSEUDO +#undef PSEUDO_END +#undef PSEUDO_NOERRNO +#undef PSEUDO_ERRVAL + +#define PSEUDO(name, syscall_name, args)	\ +	.text;					\ +	.globl		__syscall_error;	\ +ENTRY(name);					\ +	LOADSYSCALL(syscall_name);		\ +	ta		0x10;			\ +	bcc		1f;			\ +	 mov		%o7, %g1;		\ +	call		__syscall_error;	\ +	 mov		%g1, %o7;		\ +1: + +#define PSEUDO_NOERRNO(name, syscall_name, args)\ +	.text;					\ +ENTRY(name);					\ +	LOADSYSCALL(syscall_name);		\ +	ta		0x10; + +#define PSEUDO_ERRVAL(name, syscall_name, args)	\ +	.text;					\ +ENTRY(name);					\ +	LOADSYSCALL(syscall_name);		\ +	ta		0x10; + +#define PSEUDO_END(name)			\ +	END(name) + + +#endif /* __ASSEMBLER__ */ + +/* Pointer mangling is not yet supported for SPARC.  */ +#define PTR_MANGLE(var) (void) (var) +#define PTR_DEMANGLE(var) (void) (var) + +#endif diff --git a/libc/sysdeps/linux/x86_64/Makefile.arch b/libc/sysdeps/linux/x86_64/Makefile.arch index 044f97f95..de7ce7285 100644 --- a/libc/sysdeps/linux/x86_64/Makefile.arch +++ b/libc/sysdeps/linux/x86_64/Makefile.arch @@ -5,7 +5,15 @@  # Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.  # -CSRC := brk.c sigaction.c __syscall_error.c mmap.c +CSRC := brk.c __syscall_error.c mmap.c + +ifneq ($(UCLIBC_HAS_THREADS_NATIVE),y) +CSRC += sigaction.c +endif  SSRC := \ -	__longjmp.S vfork.S setjmp.S syscall.S bsd-setjmp.S bsd-_setjmp.S clone.S +	__longjmp.S setjmp.S syscall.S bsd-setjmp.S bsd-_setjmp.S + +ifneq ($(UCLIBC_HAS_THREADS_NATIVE),y) +SSRC += vfork.S clone.S +endif diff --git a/libc/sysdeps/linux/x86_64/bits/uClibc_arch_features.h b/libc/sysdeps/linux/x86_64/bits/uClibc_arch_features.h index 748e544bb..1d966aee4 100644 --- a/libc/sysdeps/linux/x86_64/bits/uClibc_arch_features.h +++ b/libc/sysdeps/linux/x86_64/bits/uClibc_arch_features.h @@ -36,7 +36,7 @@  #undef __UCLIBC_HAVE_ASM_GLOBAL_DOT_NAME__  /* define if target supports CFI pseudo ops */ -#undef __UCLIBC_HAVE_ASM_CFI_DIRECTIVES__ +#define __UCLIBC_HAVE_ASM_CFI_DIRECTIVES__  /* define if target supports IEEE signed zero floats */  #define __UCLIBC_HAVE_SIGNED_ZERO__ diff --git a/libc/sysdeps/linux/x86_64/clone.S b/libc/sysdeps/linux/x86_64/clone.S index dc5eeb0a0..8c66ce547 100644 --- a/libc/sysdeps/linux/x86_64/clone.S +++ b/libc/sysdeps/linux/x86_64/clone.S @@ -109,6 +109,8 @@ clone:  	call	*%rax  	/* Call exit with return value from function call. */  	movq	%rax, %rdi -	call	HIDDEN_JUMPTARGET(_exit) +	movl	$__NR_exit, %eax +	syscall  .size clone,.-clone +weak_alias(clone, __clone) diff --git a/libc/sysdeps/linux/x86_64/sysdep.h b/libc/sysdeps/linux/x86_64/sysdep.h new file mode 100644 index 000000000..09bb9268b --- /dev/null +++ b/libc/sysdeps/linux/x86_64/sysdep.h @@ -0,0 +1,349 @@ +/* Copyright (C) 2001-2005, 2007 Free Software Foundation, Inc. +   This file is part of the GNU C Library. + +   The GNU C Library is free software; you can redistribute it and/or +   modify it under the terms of the GNU Lesser General Public +   License as published by the Free Software Foundation; either +   version 2.1 of the License, or (at your option) any later version. + +   The GNU C Library is distributed in the hope that it will be useful, +   but WITHOUT ANY WARRANTY; without even the implied warranty of +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +   Lesser General Public License for more details. + +   You should have received a copy of the GNU Lesser General Public +   License along with the GNU C Library; if not, write to the Free +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +   02111-1307 USA.  */ + +#ifndef _LINUX_X86_64_SYSDEP_H +#define _LINUX_X86_64_SYSDEP_H 1 + +/* There is some commonality.  */ +#include <sys/syscall.h> +#include <common/sysdep.h> + +#ifdef	__ASSEMBLER__ + +/* Syntactic details of assembler.  */ + +#ifdef HAVE_ELF + +/* ELF uses byte-counts for .align, most others use log2 of count of bytes.  */ +#define ALIGNARG(log2) 1<<log2 +/* For ELF we need the `.type' directive to make shared libs work right.  */ +#define ASM_TYPE_DIRECTIVE(name,typearg) .type name,typearg; +#define ASM_SIZE_DIRECTIVE(name) .size name,.-name; + +/* In ELF C symbols are asm symbols.  */ +#undef	NO_UNDERSCORES +#define NO_UNDERSCORES + +#else + +#define ALIGNARG(log2) log2 +#define ASM_TYPE_DIRECTIVE(name,type)	/* Nothing is specified.  */ +#define ASM_SIZE_DIRECTIVE(name)	/* Nothing is specified.  */ + +#endif + + +/* Define an entry point visible from C.  */ +#define	ENTRY(name)							      \ +  ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name);				      \ +  ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function)			      \ +  .align ALIGNARG(4);							      \ +  C_LABEL(name)								      \ +  cfi_startproc;							      \ +  CALL_MCOUNT + +#undef	END +#define END(name)							      \ +  cfi_endproc;								      \ +  ASM_SIZE_DIRECTIVE(name) + +/* If compiled for profiling, call `mcount' at the start of each function.  */ +#ifdef	PROF +/* The mcount code relies on a normal frame pointer being on the stack +   to locate our caller, so push one just for its benefit.  */ +#define CALL_MCOUNT                                                          \ +  pushq %rbp;                                                                \ +  cfi_adjust_cfa_offset(8);                                                  \ +  movq %rsp, %rbp;                                                           \ +  cfi_def_cfa_register(%rbp);                                                \ +  call JUMPTARGET(mcount);                                                   \ +  popq %rbp;                                                                 \ +  cfi_def_cfa(rsp,8); +#else +#define CALL_MCOUNT		/* Do nothing.  */ +#endif + +#ifdef	NO_UNDERSCORES +/* Since C identifiers are not normally prefixed with an underscore +   on this system, the asm identifier `syscall_error' intrudes on the +   C name space.  Make sure we use an innocuous name.  */ +#define	syscall_error	__syscall_error +#define mcount		_mcount +#endif + +#define	PSEUDO(name, syscall_name, args)				      \ +lose:									      \ +  jmp JUMPTARGET(syscall_error)						      \ +  .globl syscall_error;							      \ +  ENTRY (name)								      \ +  DO_CALL (syscall_name, args);						      \ +  jb lose + +#undef	PSEUDO_END +#define	PSEUDO_END(name)						      \ +  END (name) + +#undef JUMPTARGET +#ifdef __PIC__ +#define JUMPTARGET(name)	name##@PLT +#else +#define JUMPTARGET(name)	name +#endif + +/* Local label name for asm code. */ +#ifndef L +# ifdef HAVE_ELF +/* ELF-like local names start with `.L'.  */ +#  define L(name)	.L##name +# else +#  define L(name)	name +# endif +#endif + +#endif	/* __ASSEMBLER__ */ + +/* For Linux we can use the system call table in the header file +	/usr/include/asm/unistd.h +   of the kernel.  But these symbols do not follow the SYS_* syntax +   so we have to redefine the `SYS_ify' macro here.  */ +#undef SYS_ify +#define SYS_ify(syscall_name)	__NR_##syscall_name + +/* This is a kludge to make syscalls.list find these under the names +   pread and pwrite, since some kernel headers define those names +   and some define the *64 names for the same system calls.  */ +#if !defined __NR_pread && defined __NR_pread64 +# define __NR_pread __NR_pread64 +#endif +#if !defined __NR_pwrite && defined __NR_pwrite64 +# define __NR_pwrite __NR_pwrite64 +#endif + +/* This is to help the old kernel headers where __NR_semtimedop is not +   available.  */ +#ifndef __NR_semtimedop +# define __NR_semtimedop 220 +#endif + + +#ifdef __ASSEMBLER__ + +/* Linux uses a negative return value to indicate syscall errors, +   unlike most Unices, which use the condition codes' carry flag. + +   Since version 2.1 the return value of a system call might be +   negative even if the call succeeded.	 E.g., the `lseek' system call +   might return a large offset.	 Therefore we must not anymore test +   for < 0, but test for a real error by making sure the value in %eax +   is a real error number.  Linus said he will make sure the no syscall +   returns a value in -1 .. -4095 as a valid result so we can savely +   test with -4095.  */ + +/* We don't want the label for the error handle to be global when we define +   it here.  */ +# ifdef __PIC__ +#  define SYSCALL_ERROR_LABEL 0f +# else +#  define SYSCALL_ERROR_LABEL syscall_error +# endif + +# undef	PSEUDO +# define PSEUDO(name, syscall_name, args)				      \ +  .text;								      \ +  ENTRY (name)								      \ +    DO_CALL (syscall_name, args);					      \ +    cmpq $-4095, %rax;							      \ +    jae SYSCALL_ERROR_LABEL;						      \ +  L(pseudo_end): + +# undef	PSEUDO_END +# define PSEUDO_END(name)						      \ +  SYSCALL_ERROR_HANDLER							      \ +  END (name) + +# undef	PSEUDO_NOERRNO +# define PSEUDO_NOERRNO(name, syscall_name, args) \ +  .text;								      \ +  ENTRY (name)								      \ +    DO_CALL (syscall_name, args) + +# undef	PSEUDO_END_NOERRNO +# define PSEUDO_END_NOERRNO(name) \ +  END (name) + +# define ret_NOERRNO ret + +# undef	PSEUDO_ERRVAL +# define PSEUDO_ERRVAL(name, syscall_name, args) \ +  .text;								      \ +  ENTRY (name)								      \ +    DO_CALL (syscall_name, args);					      \ +    negq %rax + +# undef	PSEUDO_END_ERRVAL +# define PSEUDO_END_ERRVAL(name) \ +  END (name) + +# define ret_ERRVAL ret + +# ifndef __PIC__ +#  define SYSCALL_ERROR_HANDLER	/* Nothing here; code in sysdep.S is used.  */ +# elif defined(RTLD_PRIVATE_ERRNO) +#  define SYSCALL_ERROR_HANDLER			\ +0:						\ +  leaq rtld_errno(%rip), %rcx;			\ +  xorl %edx, %edx;				\ +  subq %rax, %rdx;				\ +  movl %edx, (%rcx);				\ +  orq $-1, %rax;				\ +  jmp L(pseudo_end); +# elif USE___THREAD +#  ifndef NOT_IN_libc +#   define SYSCALL_ERROR_ERRNO __libc_errno +#  else +#   define SYSCALL_ERROR_ERRNO errno +#  endif +#  define SYSCALL_ERROR_HANDLER			\ +0:						\ +  movq SYSCALL_ERROR_ERRNO@GOTTPOFF(%rip), %rcx;\ +  xorl %edx, %edx;				\ +  subq %rax, %rdx;				\ +  movl %edx, %fs:(%rcx);			\ +  orq $-1, %rax;				\ +  jmp L(pseudo_end); +# elif defined _LIBC_REENTRANT +/* Store (- %rax) into errno through the GOT. +   Note that errno occupies only 4 bytes.  */ +#  define SYSCALL_ERROR_HANDLER			\ +0:						\ +  xorl %edx, %edx;				\ +  subq %rax, %rdx;				\ +  pushq %rdx;					\ +  cfi_adjust_cfa_offset(8);			\ +  call __errno_location@PLT;		\ +  popq %rdx;					\ +  cfi_adjust_cfa_offset(-8);			\ +  movl %edx, (%rax);				\ +  orq $-1, %rax;				\ +  jmp L(pseudo_end); + +/* A quick note: it is assumed that the call to `__errno_location' does +   not modify the stack!  */ +# else /* Not _LIBC_REENTRANT.  */ +#  define SYSCALL_ERROR_HANDLER			\ +0:movq errno@GOTPCREL(%RIP), %rcx;		\ +  xorl %edx, %edx;				\ +  subq %rax, %rdx;				\ +  movl %edx, (%rcx);				\ +  orq $-1, %rax;				\ +  jmp L(pseudo_end); +# endif	/* __PIC__ */ + +/* The Linux/x86-64 kernel expects the system call parameters in +   registers according to the following table: + +    syscall number	rax +    arg 1		rdi +    arg 2		rsi +    arg 3		rdx +    arg 4		r10 +    arg 5		r8 +    arg 6		r9 + +    The Linux kernel uses and destroys internally these registers: +    return address from +    syscall		rcx +    eflags from syscall	r11 + +    Normal function call, including calls to the system call stub +    functions in the libc, get the first six parameters passed in +    registers and the seventh parameter and later on the stack.  The +    register use is as follows: + +     system call number	in the DO_CALL macro +     arg 1		rdi +     arg 2		rsi +     arg 3		rdx +     arg 4		rcx +     arg 5		r8 +     arg 6		r9 + +    We have to take care that the stack is aligned to 16 bytes.  When +    called the stack is not aligned since the return address has just +    been pushed. + + +    Syscalls of more than 6 arguments are not supported.  */ + +# undef	DO_CALL +# define DO_CALL(syscall_name, args)		\ +    DOARGS_##args				\ +    movl $SYS_ify (syscall_name), %eax;		\ +    syscall; + +# define DOARGS_0 /* nothing */ +# define DOARGS_1 /* nothing */ +# define DOARGS_2 /* nothing */ +# define DOARGS_3 /* nothing */ +# define DOARGS_4 movq %rcx, %r10; +# define DOARGS_5 DOARGS_4 +# define DOARGS_6 DOARGS_5 + +#endif	/* __ASSEMBLER__ */ + + +/* Pointer mangling support.  */ +#if defined NOT_IN_libc && defined IS_IN_rtld +/* We cannot use the thread descriptor because in ld.so we use setjmp +   earlier than the descriptor is initialized.  */ +# ifdef __ASSEMBLER__ +#  define PTR_MANGLE(reg)	xorq __pointer_chk_guard_local(%rip), reg;    \ +				rolq $17, reg +#  define PTR_DEMANGLE(reg)	rorq $17, reg;				      \ +				xorq __pointer_chk_guard_local(%rip), reg +# else +#  define PTR_MANGLE(reg)	__asm__ ("xorq __pointer_chk_guard_local(%%rip), %0\n" \ +				     "rolq $17, %0"			      \ +				     : "=r" (reg) : "0" (reg)) +#  define PTR_DEMANGLE(reg)	__asm__ ("rorq $17, %0\n"			      \ +				     "xorq __pointer_chk_guard_local(%%rip), %0" \ +				     : "=r" (reg) : "0" (reg)) +# endif +#else +# ifdef __ASSEMBLER__ +#  define PTR_MANGLE(reg)	xorq %fs:POINTER_GUARD, reg;		      \ +				rolq $17, reg +#  define PTR_DEMANGLE(reg)	rorq $17, reg;				      \ +				xorq %fs:POINTER_GUARD, reg +# else +#  define PTR_MANGLE(var)	__asm__ ("xorq %%fs:%c2, %0\n"		      \ +				     "rolq $17, %0"			      \ +				     : "=r" (var)			      \ +				     : "0" (var),			      \ +				       "i" (offsetof (tcbhead_t,	      \ +						      pointer_guard))) +#  define PTR_DEMANGLE(var)	__asm__ ("rorq $17, %0\n"			      \ +				     "xorq %%fs:%c2, %0"		      \ +				     : "=r" (var)			      \ +				     : "0" (var),			      \ +				       "i" (offsetof (tcbhead_t,	      \ +						      pointer_guard))) +# endif +#endif + +#endif /* linux/x86_64/sysdep.h */ diff --git a/libc/sysdeps/linux/x86_64/vfork.S b/libc/sysdeps/linux/x86_64/vfork.S index 2dadbbfe0..97c9c5b67 100644 --- a/libc/sysdeps/linux/x86_64/vfork.S +++ b/libc/sysdeps/linux/x86_64/vfork.S @@ -1,4 +1,4 @@ -/* Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc. +/* Copyright (C) 2001, 2002, 2004, 2008 Free Software Foundation, Inc.     This file is part of the GNU C Library.     The GNU C Library is free software; you can redistribute it and/or @@ -31,7 +31,7 @@  .text  .global __vfork  .hidden __vfork -.type	__vfork,%function +.type   __vfork,%function  __vfork: @@ -39,6 +39,10 @@ __vfork:  	   is preserved by the syscall and that we're allowed to destroy. */  	popq	%rdi +#ifdef SAVE_PID +	SAVE_PID +#endif +  	/* Stuff the syscall number in RAX and enter into the kernel.  */  	movl	$__NR_vfork, %eax  	syscall @@ -46,6 +50,10 @@ __vfork:  	/* Push back the return PC.  */  	pushq	%rdi +#ifdef RESTORE_PID +	RESTORE_PID +#endif +  	cmpl	$-4095, %eax  	jae __syscall_error		/* Branch forward if it failed.  */ | 
