From e428bff6b2825a5081ae2f081471e5e3921a2e9f Mon Sep 17 00:00:00 2001 From: Austin Foxley Date: Thu, 18 Nov 2010 10:20:43 -0800 Subject: Adding quad float gcc intrinsic implementation for SPARC Sparc-Gcc generates q_xxx intrinsic calls for quad float ("long double") code. The routines from glibc's glibc-2.9/sysdeps/sparc/sparc32/soft-fp/* where taken and ported to uclibc. Signed-off-by: Konrad Eisele Signed-off-by: Austin Foxley --- libc/sysdeps/linux/sparc/Makefile.arch | 9 +- libc/sysdeps/linux/sparc/soft-fp/double.h | 264 +++++ libc/sysdeps/linux/sparc/soft-fp/extended.h | 431 +++++++ libc/sysdeps/linux/sparc/soft-fp/longlong.h | 1461 ++++++++++++++++++++++++ libc/sysdeps/linux/sparc/soft-fp/mp_clz_tab.c | 37 + libc/sysdeps/linux/sparc/soft-fp/op-1.h | 302 +++++ libc/sysdeps/linux/sparc/soft-fp/op-2.h | 617 ++++++++++ libc/sysdeps/linux/sparc/soft-fp/op-4.h | 688 +++++++++++ libc/sysdeps/linux/sparc/soft-fp/op-8.h | 111 ++ libc/sysdeps/linux/sparc/soft-fp/op-common.h | 1359 ++++++++++++++++++++++ libc/sysdeps/linux/sparc/soft-fp/q_add.c | 39 + libc/sysdeps/linux/sparc/soft-fp/q_cmp.c | 41 + libc/sysdeps/linux/sparc/soft-fp/q_cmpe.c | 42 + libc/sysdeps/linux/sparc/soft-fp/q_div.c | 39 + libc/sysdeps/linux/sparc/soft-fp/q_dtoq.c | 44 + libc/sysdeps/linux/sparc/soft-fp/q_feq.c | 40 + libc/sysdeps/linux/sparc/soft-fp/q_fge.c | 40 + libc/sysdeps/linux/sparc/soft-fp/q_fgt.c | 40 + libc/sysdeps/linux/sparc/soft-fp/q_fle.c | 40 + libc/sysdeps/linux/sparc/soft-fp/q_flt.c | 40 + libc/sysdeps/linux/sparc/soft-fp/q_fne.c | 40 + libc/sysdeps/linux/sparc/soft-fp/q_itoq.c | 38 + libc/sysdeps/linux/sparc/soft-fp/q_lltoq.c | 38 + libc/sysdeps/linux/sparc/soft-fp/q_mul.c | 39 + libc/sysdeps/linux/sparc/soft-fp/q_neg.c | 47 + libc/sysdeps/linux/sparc/soft-fp/q_qtod.c | 45 + libc/sysdeps/linux/sparc/soft-fp/q_qtoi.c | 38 + libc/sysdeps/linux/sparc/soft-fp/q_qtoll.c | 38 + libc/sysdeps/linux/sparc/soft-fp/q_qtos.c | 45 + libc/sysdeps/linux/sparc/soft-fp/q_qtou.c | 38 + libc/sysdeps/linux/sparc/soft-fp/q_qtoull.c | 38 + libc/sysdeps/linux/sparc/soft-fp/q_sqrt.c | 39 + libc/sysdeps/linux/sparc/soft-fp/q_stoq.c | 43 + libc/sysdeps/linux/sparc/soft-fp/q_sub.c | 39 + libc/sysdeps/linux/sparc/soft-fp/q_ulltoq.c | 38 + libc/sysdeps/linux/sparc/soft-fp/q_util.c | 57 + libc/sysdeps/linux/sparc/soft-fp/q_utoq.c | 38 + libc/sysdeps/linux/sparc/soft-fp/quad.h | 271 +++++ libc/sysdeps/linux/sparc/soft-fp/sfp-machine.h | 213 ++++ libc/sysdeps/linux/sparc/soft-fp/single.h | 151 +++ libc/sysdeps/linux/sparc/soft-fp/soft-fp.h | 205 ++++ 41 files changed, 7220 insertions(+), 2 deletions(-) create mode 100644 libc/sysdeps/linux/sparc/soft-fp/double.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/extended.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/longlong.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/mp_clz_tab.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/op-1.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/op-2.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/op-4.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/op-8.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/op-common.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_add.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_cmp.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_cmpe.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_div.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_dtoq.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_feq.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_fge.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_fgt.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_fle.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_flt.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_fne.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_itoq.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_lltoq.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_mul.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_neg.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_qtod.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_qtoi.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_qtoll.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_qtos.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_qtou.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_qtoull.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_sqrt.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_stoq.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_sub.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_ulltoq.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_util.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/q_utoq.c create mode 100644 libc/sysdeps/linux/sparc/soft-fp/quad.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/sfp-machine.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/single.h create mode 100644 libc/sysdeps/linux/sparc/soft-fp/soft-fp.h (limited to 'libc/sysdeps') diff --git a/libc/sysdeps/linux/sparc/Makefile.arch b/libc/sysdeps/linux/sparc/Makefile.arch index 8a624205a..91c6e85e4 100644 --- a/libc/sysdeps/linux/sparc/Makefile.arch +++ b/libc/sysdeps/linux/sparc/Makefile.arch @@ -5,7 +5,7 @@ # Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. # -CSRC := brk.c __syscall_error.c qp_ops.c +CSRC := brk.c __syscall_error.c SSRC := \ __longjmp.S setjmp.S bsd-setjmp.S bsd-_setjmp.S \ @@ -16,4 +16,9 @@ CSRC += sigaction.c SSRC += fork.S vfork.S endif - +CSRC += $(foreach f, \ + q_div.c q_fle.c q_mul.c q_qtoll.c q_stoq.c \ + mp_clz_tab.c q_dtoq.c q_flt.c q_neg.c q_qtos.c q_sub.c \ + q_add.c q_feq.c q_fne.c q_qtod.c q_qtou.c q_ulltoq.c \ + q_cmp.c q_fge.c q_itoq.c q_qtoull.c q_util.c \ + q_cmpe.c q_fgt.c q_lltoq.c q_qtoi.c q_sqrt.c q_utoq.c, soft-fp/$(f)) diff --git a/libc/sysdeps/linux/sparc/soft-fp/double.h b/libc/sysdeps/linux/sparc/soft-fp/double.h new file mode 100644 index 000000000..b012d9d51 --- /dev/null +++ b/libc/sysdeps/linux/sparc/soft-fp/double.h @@ -0,0 +1,264 @@ +/* Software floating-point emulation. + Definitions for IEEE Double Precision + Copyright (C) 1997,1998,1999,2006,2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Richard Henderson (rth@cygnus.com), + Jakub Jelinek (jj@ultra.linux.cz), + David S. Miller (davem@redhat.com) and + Peter Maydell (pmaydell@chiark.greenend.org.uk). + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file into + combinations with other programs, and to distribute those + combinations without any restriction coming from the use of this + file. (The Lesser General Public License restrictions do apply in + other respects; for example, they cover modification of the file, + and distribution when not linked into a combine executable.) + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301, USA. */ + +#if _FP_W_TYPE_SIZE < 32 +#error "Here's a nickel kid. Go buy yourself a real computer." +#endif + +#if _FP_W_TYPE_SIZE < 64 +#define _FP_FRACTBITS_D (2 * _FP_W_TYPE_SIZE) +#else +#define _FP_FRACTBITS_D _FP_W_TYPE_SIZE +#endif + +#define _FP_FRACBITS_D 53 +#define _FP_FRACXBITS_D (_FP_FRACTBITS_D - _FP_FRACBITS_D) +#define _FP_WFRACBITS_D (_FP_WORKBITS + _FP_FRACBITS_D) +#define _FP_WFRACXBITS_D (_FP_FRACTBITS_D - _FP_WFRACBITS_D) +#define _FP_EXPBITS_D 11 +#define _FP_EXPBIAS_D 1023 +#define _FP_EXPMAX_D 2047 + +#define _FP_QNANBIT_D \ + ((_FP_W_TYPE)1 << (_FP_FRACBITS_D-2) % _FP_W_TYPE_SIZE) +#define _FP_QNANBIT_SH_D \ + ((_FP_W_TYPE)1 << (_FP_FRACBITS_D-2+_FP_WORKBITS) % _FP_W_TYPE_SIZE) +#define _FP_IMPLBIT_D \ + ((_FP_W_TYPE)1 << (_FP_FRACBITS_D-1) % _FP_W_TYPE_SIZE) +#define _FP_IMPLBIT_SH_D \ + ((_FP_W_TYPE)1 << (_FP_FRACBITS_D-1+_FP_WORKBITS) % _FP_W_TYPE_SIZE) +#define _FP_OVERFLOW_D \ + ((_FP_W_TYPE)1 << _FP_WFRACBITS_D % _FP_W_TYPE_SIZE) + +typedef float DFtype __attribute__((mode(DF))); + +#if _FP_W_TYPE_SIZE < 64 + +union _FP_UNION_D +{ + DFtype flt; + struct { +#if __BYTE_ORDER == __BIG_ENDIAN + unsigned sign : 1; + unsigned exp : _FP_EXPBITS_D; + unsigned frac1 : _FP_FRACBITS_D - (_FP_IMPLBIT_D != 0) - _FP_W_TYPE_SIZE; + unsigned frac0 : _FP_W_TYPE_SIZE; +#else + unsigned frac0 : _FP_W_TYPE_SIZE; + unsigned frac1 : _FP_FRACBITS_D - (_FP_IMPLBIT_D != 0) - _FP_W_TYPE_SIZE; + unsigned exp : _FP_EXPBITS_D; + unsigned sign : 1; +#endif + } bits __attribute__((packed)); +}; + +#define FP_DECL_D(X) _FP_DECL(2,X) +#define FP_UNPACK_RAW_D(X,val) _FP_UNPACK_RAW_2(D,X,val) +#define FP_UNPACK_RAW_DP(X,val) _FP_UNPACK_RAW_2_P(D,X,val) +#define FP_PACK_RAW_D(val,X) _FP_PACK_RAW_2(D,val,X) +#define FP_PACK_RAW_DP(val,X) \ + do { \ + if (!FP_INHIBIT_RESULTS) \ + _FP_PACK_RAW_2_P(D,val,X); \ + } while (0) + +#define FP_UNPACK_D(X,val) \ + do { \ + _FP_UNPACK_RAW_2(D,X,val); \ + _FP_UNPACK_CANONICAL(D,2,X); \ + } while (0) + +#define FP_UNPACK_DP(X,val) \ + do { \ + _FP_UNPACK_RAW_2_P(D,X,val); \ + _FP_UNPACK_CANONICAL(D,2,X); \ + } while (0) + +#define FP_UNPACK_SEMIRAW_D(X,val) \ + do { \ + _FP_UNPACK_RAW_2(D,X,val); \ + _FP_UNPACK_SEMIRAW(D,2,X); \ + } while (0) + +#define FP_UNPACK_SEMIRAW_DP(X,val) \ + do { \ + _FP_UNPACK_RAW_2_P(D,X,val); \ + _FP_UNPACK_SEMIRAW(D,2,X); \ + } while (0) + +#define FP_PACK_D(val,X) \ + do { \ + _FP_PACK_CANONICAL(D,2,X); \ + _FP_PACK_RAW_2(D,val,X); \ + } while (0) + +#define FP_PACK_DP(val,X) \ + do { \ + _FP_PACK_CANONICAL(D,2,X); \ + if (!FP_INHIBIT_RESULTS) \ + _FP_PACK_RAW_2_P(D,val,X); \ + } while (0) + +#define FP_PACK_SEMIRAW_D(val,X) \ + do { \ + _FP_PACK_SEMIRAW(D,2,X); \ + _FP_PACK_RAW_2(D,val,X); \ + } while (0) + +#define FP_PACK_SEMIRAW_DP(val,X) \ + do { \ + _FP_PACK_SEMIRAW(D,2,X); \ + if (!FP_INHIBIT_RESULTS) \ + _FP_PACK_RAW_2_P(D,val,X); \ + } while (0) + +#define FP_ISSIGNAN_D(X) _FP_ISSIGNAN(D,2,X) +#define FP_NEG_D(R,X) _FP_NEG(D,2,R,X) +#define FP_ADD_D(R,X,Y) _FP_ADD(D,2,R,X,Y) +#define FP_SUB_D(R,X,Y) _FP_SUB(D,2,R,X,Y) +#define FP_MUL_D(R,X,Y) _FP_MUL(D,2,R,X,Y) +#define FP_DIV_D(R,X,Y) _FP_DIV(D,2,R,X,Y) +#define FP_SQRT_D(R,X) _FP_SQRT(D,2,R,X) +#define _FP_SQRT_MEAT_D(R,S,T,X,Q) _FP_SQRT_MEAT_2(R,S,T,X,Q) + +#define FP_CMP_D(r,X,Y,un) _FP_CMP(D,2,r,X,Y,un) +#define FP_CMP_EQ_D(r,X,Y) _FP_CMP_EQ(D,2,r,X,Y) +#define FP_CMP_UNORD_D(r,X,Y) _FP_CMP_UNORD(D,2,r,X,Y) + +#define FP_TO_INT_D(r,X,rsz,rsg) _FP_TO_INT(D,2,r,X,rsz,rsg) +#define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,2,X,r,rs,rt) + +#define _FP_FRAC_HIGH_D(X) _FP_FRAC_HIGH_2(X) +#define _FP_FRAC_HIGH_RAW_D(X) _FP_FRAC_HIGH_2(X) + +#else + +union _FP_UNION_D +{ + DFtype flt; + struct { +#if __BYTE_ORDER == __BIG_ENDIAN + unsigned sign : 1; + unsigned exp : _FP_EXPBITS_D; + _FP_W_TYPE frac : _FP_FRACBITS_D - (_FP_IMPLBIT_D != 0); +#else + _FP_W_TYPE frac : _FP_FRACBITS_D - (_FP_IMPLBIT_D != 0); + unsigned exp : _FP_EXPBITS_D; + unsigned sign : 1; +#endif + } bits __attribute__((packed)); +}; + +#define FP_DECL_D(X) _FP_DECL(1,X) +#define FP_UNPACK_RAW_D(X,val) _FP_UNPACK_RAW_1(D,X,val) +#define FP_UNPACK_RAW_DP(X,val) _FP_UNPACK_RAW_1_P(D,X,val) +#define FP_PACK_RAW_D(val,X) _FP_PACK_RAW_1(D,val,X) +#define FP_PACK_RAW_DP(val,X) \ + do { \ + if (!FP_INHIBIT_RESULTS) \ + _FP_PACK_RAW_1_P(D,val,X); \ + } while (0) + +#define FP_UNPACK_D(X,val) \ + do { \ + _FP_UNPACK_RAW_1(D,X,val); \ + _FP_UNPACK_CANONICAL(D,1,X); \ + } while (0) + +#define FP_UNPACK_DP(X,val) \ + do { \ + _FP_UNPACK_RAW_1_P(D,X,val); \ + _FP_UNPACK_CANONICAL(D,1,X); \ + } while (0) + +#define FP_UNPACK_SEMIRAW_D(X,val) \ + do { \ + _FP_UNPACK_RAW_2(1,X,val); \ + _FP_UNPACK_SEMIRAW(D,1,X); \ + } while (0) + +#define FP_UNPACK_SEMIRAW_DP(X,val) \ + do { \ + _FP_UNPACK_RAW_2_P(1,X,val); \ + _FP_UNPACK_SEMIRAW(D,1,X); \ + } while (0) + +#define FP_PACK_D(val,X) \ + do { \ + _FP_PACK_CANONICAL(D,1,X); \ + _FP_PACK_RAW_1(D,val,X); \ + } while (0) + +#define FP_PACK_DP(val,X) \ + do { \ + _FP_PACK_CANONICAL(D,1,X); \ + if (!FP_INHIBIT_RESULTS) \ + _FP_PACK_RAW_1_P(D,val,X); \ + } while (0) + +#define FP_PACK_SEMIRAW_D(val,X) \ + do { \ + _FP_PACK_SEMIRAW(D,1,X); \ + _FP_PACK_RAW_1(D,val,X); \ + } while (0) + +#define FP_PACK_SEMIRAW_DP(val,X) \ + do { \ + _FP_PACK_SEMIRAW(D,1,X); \ + if (!FP_INHIBIT_RESULTS) \ + _FP_PACK_RAW_1_P(D,val,X); \ + } while (0) + +#define FP_ISSIGNAN_D(X) _FP_ISSIGNAN(D,1,X) +#define FP_NEG_D(R,X) _FP_NEG(D,1,R,X) +#define FP_ADD_D(R,X,Y) _FP_ADD(D,1,R,X,Y) +#define FP_SUB_D(R,X,Y) _FP_SUB(D,1,R,X,Y) +#define FP_MUL_D(R,X,Y) _FP_MUL(D,1,R,X,Y) +#define FP_DIV_D(R,X,Y) _FP_DIV(D,1,R,X,Y) +#define FP_SQRT_D(R,X) _FP_SQRT(D,1,R,X) +#define _FP_SQRT_MEAT_D(R,S,T,X,Q) _FP_SQRT_MEAT_1(R,S,T,X,Q) + +/* The implementation of _FP_MUL_D and _FP_DIV_D should be chosen by + the target machine. */ + +#define FP_CMP_D(r,X,Y,un) _FP_CMP(D,1,r,X,Y,un) +#define FP_CMP_EQ_D(r,X,Y) _FP_CMP_EQ(D,1,r,X,Y) +#define FP_CMP_UNORD_D(r,X,Y) _FP_CMP_UNORD(D,1,r,X,Y) + +#define FP_TO_INT_D(r,X,rsz,rsg) _FP_TO_INT(D,1,r,X,rsz,rsg) +#define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,1,X,r,rs,rt) + +#define _FP_FRAC_HIGH_D(X) _FP_FRAC_HIGH_1(X) +#define _FP_FRAC_HIGH_RAW_D(X) _FP_FRAC_HIGH_1(X) + +#endif /* W_TYPE_SIZE < 64 */ diff --git a/libc/sysdeps/linux/sparc/soft-fp/extended.h b/libc/sysdeps/linux/sparc/soft-fp/extended.h new file mode 100644 index 000000000..e5f16debe --- /dev/null +++ b/libc/sysdeps/linux/sparc/soft-fp/extended.h @@ -0,0 +1,431 @@ +/* Software floating-point emulation. + Definitions for IEEE Extended Precision. + Copyright (C) 1999,2006,2007 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Jakub Jelinek (jj@ultra.linux.cz). + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file into + combinations with other programs, and to distribute those + combinations without any restriction coming from the use of this + file. (The Lesser General Public License restrictions do apply in + other respects; for example, they cover modification of the file, + and distribution when not linked into a combine executable.) + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301, USA. */ + +#if _FP_W_TYPE_SIZE < 32 +#error "Here's a nickel, kid. Go buy yourself a real computer." +#endif + +#if _FP_W_TYPE_SIZE < 64 +#define _FP_FRACTBITS_E (4*_FP_W_TYPE_SIZE) +#else +#define _FP_FRACTBITS_E (2*_FP_W_TYPE_SIZE) +#endif + +#define _FP_FRACBITS_E 64 +#define _FP_FRACXBITS_E (_FP_FRACTBITS_E - _FP_FRACBITS_E) +#define _FP_WFRACBITS_E (_FP_WORKBITS + _FP_FRACBITS_E) +#define _FP_WFRACXBITS_E (_FP_FRACTBITS_E - _FP_WFRACBITS_E) +#define _FP_EXPBITS_E 15 +#define _FP_EXPBIAS_E 16383 +#define _FP_EXPMAX_E 32767 + +#define _FP_QNANBIT_E \ + ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-2) % _FP_W_TYPE_SIZE) +#define _FP_QNANBIT_SH_E \ + ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-2+_FP_WORKBITS) % _FP_W_TYPE_SIZE) +#define _FP_IMPLBIT_E \ + ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-1) % _FP_W_TYPE_SIZE) +#define _FP_IMPLBIT_SH_E \ + ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-1+_FP_WORKBITS) % _FP_W_TYPE_SIZE) +#define _FP_OVERFLOW_E \ + ((_FP_W_TYPE)1 << (_FP_WFRACBITS_E % _FP_W_TYPE_SIZE)) + +typedef float XFtype __attribute__((mode(XF))); + +#if _FP_W_TYPE_SIZE < 64 + +union _FP_UNION_E +{ + XFtype flt; + struct + { +#if __BYTE_ORDER == __BIG_ENDIAN + unsigned long pad1 : _FP_W_TYPE_SIZE; + unsigned long pad2 : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E); + unsigned long sign : 1; + unsigned long exp : _FP_EXPBITS_E; + unsigned long frac1 : _FP_W_TYPE_SIZE; + unsigned long frac0 : _FP_W_TYPE_SIZE; +#else + unsigned long frac0 : _FP_W_TYPE_SIZE; + unsigned long frac1 : _FP_W_TYPE_SIZE; + unsigned exp : _FP_EXPBITS_E; + unsigned sign : 1; +#endif /* not bigendian */ + } bits __attribute__((packed)); +}; + + +#define FP_DECL_E(X) _FP_DECL(4,X) + +#define FP_UNPACK_RAW_E(X, val) \ + do { \ + union _FP_UNION_E _flo; _flo.flt = (val); \ + \ + X##_f[2] = 0; X##_f[3] = 0; \ + X##_f[0] = _flo.bits.frac0; \ + X##_f[1] = _flo.bits.frac1; \ + X##_e = _flo.bits.exp; \ + X##_s = _flo.bits.sign; \ + } while (0) + +#define FP_UNPACK_RAW_EP(X, val) \ + do { \ + union _FP_UNION_E *_flo = \ + (union _FP_UNION_E *)(val); \ + \ + X##_f[2] = 0; X##_f[3] = 0; \ + X##_f[0] = _flo->bits.frac0; \ + X##_f[1] = _flo->bits.frac1; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + } while (0) + +#define FP_PACK_RAW_E(val, X) \ + do { \ + union _FP_UNION_E _flo; \ + \ + if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \ + else X##_f[1] &= ~(_FP_IMPLBIT_E); \ + _flo.bits.frac0 = X##_f[0]; \ + _flo.bits.frac1 = X##_f[1]; \ + _flo.bits.exp = X##_e; \ + _flo.bits.sign = X##_s; \ + \ + (val) = _flo.flt; \ + } while (0) + +#define FP_PACK_RAW_EP(val, X) \ + do { \ + if (!FP_INHIBIT_RESULTS) \ + { \ + union _FP_UNION_E *_flo = \ + (union _FP_UNION_E *)(val); \ + \ + if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \ + else X##_f[1] &= ~(_FP_IMPLBIT_E); \ + _flo->bits.frac0 = X##_f[0]; \ + _flo->bits.frac1 = X##_f[1]; \ + _flo->bits.exp = X##_e; \ + _flo->bits.sign = X##_s; \ + } \ + } while (0) + +#define FP_UNPACK_E(X,val) \ + do { \ + FP_UNPACK_RAW_E(X,val); \ + _FP_UNPACK_CANONICAL(E,4,X); \ + } while (0) + +#define FP_UNPACK_EP(X,val) \ + do { \ + FP_UNPACK_RAW_EP(X,val); \ + _FP_UNPACK_CANONICAL(E,4,X); \ + } while (0) + +#define FP_UNPACK_SEMIRAW_E(X,val) \ + do { \ + FP_UNPACK_RAW_E(X,val); \ + _FP_UNPACK_SEMIRAW(E,4,X); \ + } while (0) + +#define FP_UNPACK_SEMIRAW_EP(X,val) \ + do { \ + FP_UNPACK_RAW_EP(X,val); \ + _FP_UNPACK_SEMIRAW(E,4,X); \ + } while (0) + +#define FP_PACK_E(val,X) \ + do { \ + _FP_PACK_CANONICAL(E,4,X); \ + FP_PACK_RAW_E(val,X); \ + } while (0) + +#define FP_PACK_EP(val,X) \ + do { \ + _FP_PACK_CANONICAL(E,4,X); \ + FP_PACK_RAW_EP(val,X); \ + } while (0) + +#define FP_PACK_SEMIRAW_E(val,X) \ + do { \ + _FP_PACK_SEMIRAW(E,4,X); \ + FP_PACK_RAW_E(val,X); \ + } while (0) + +#define FP_PACK_SEMIRAW_EP(val,X) \ + do { \ + _FP_PACK_SEMIRAW(E,4,X); \ + FP_PACK_RAW_EP(val,X); \ + } while (0) + +#define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,4,X) +#define FP_NEG_E(R,X) _FP_NEG(E,4,R,X) +#define FP_ADD_E(R,X,Y) _FP_ADD(E,4,R,X,Y) +#define FP_SUB_E(R,X,Y) _FP_SUB(E,4,R,X,Y) +#define FP_MUL_E(R,X,Y) _FP_MUL(E,4,R,X,Y) +#define FP_DIV_E(R,X,Y) _FP_DIV(E,4,R,X,Y) +#define FP_SQRT_E(R,X) _FP_SQRT(E,4,R,X) + +/* + * Square root algorithms: + * We have just one right now, maybe Newton approximation + * should be added for those machines where division is fast. + * This has special _E version because standard _4 square + * root would not work (it has to start normally with the + * second word and not the first), but as we have to do it + * anyway, we optimize it by doing most of the calculations + * in two UWtype registers instead of four. + */ + +#define _FP_SQRT_MEAT_E(R, S, T, X, q) \ + do { \ + q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ + _FP_FRAC_SRL_4(X, (_FP_WORKBITS)); \ + while (q) \ + { \ + T##_f[1] = S##_f[1] + q; \ + if (T##_f[1] <= X##_f[1]) \ + { \ + S##_f[1] = T##_f[1] + q; \ + X##_f[1] -= T##_f[1]; \ + R##_f[1] += q; \ + } \ + _FP_FRAC_SLL_2(X, 1); \ + q >>= 1; \ + } \ + q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ + while (q) \ + { \ + T##_f[0] = S##_f[0] + q; \ + T##_f[1] = S##_f[1]; \ + if (T##_f[1] < X##_f[1] || \ + (T##_f[1] == X##_f[1] && \ + T##_f[0] <= X##_f[0])) \ + { \ + S##_f[0] = T##_f[0] + q; \ + S##_f[1] += (T##_f[0] > S##_f[0]); \ + _FP_FRAC_DEC_2(X, T); \ + R##_f[0] += q; \ + } \ + _FP_FRAC_SLL_2(X, 1); \ + q >>= 1; \ + } \ + _FP_FRAC_SLL_4(R, (_FP_WORKBITS)); \ + if (X##_f[0] | X##_f[1]) \ + { \ + if (S##_f[1] < X##_f[1] || \ + (S##_f[1] == X##_f[1] && \ + S##_f[0] < X##_f[0])) \ + R##_f[0] |= _FP_WORK_ROUND; \ + R##_f[0] |= _FP_WORK_STICKY; \ + } \ + } while (0) + +#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,4,r,X,Y,un) +#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,4,r,X,Y) +#define FP_CMP_UNORD_E(r,X,Y) _FP_CMP_UNORD(E,4,r,X,Y) + +#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,4,r,X,rsz,rsg) +#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,4,X,r,rs,rt) + +#define _FP_FRAC_HIGH_E(X) (X##_f[2]) +#define _FP_FRAC_HIGH_RAW_E(X) (X##_f[1]) + +#else /* not _FP_W_TYPE_SIZE < 64 */ +union _FP_UNION_E +{ + XFtype flt; + struct { +#if __BYTE_ORDER == __BIG_ENDIAN + _FP_W_TYPE pad : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E); + unsigned sign : 1; + unsigned exp : _FP_EXPBITS_E; + _FP_W_TYPE frac : _FP_W_TYPE_SIZE; +#else + _FP_W_TYPE frac : _FP_W_TYPE_SIZE; + unsigned exp : _FP_EXPBITS_E; + unsigned sign : 1; +#endif + } bits; +}; + +#define FP_DECL_E(X) _FP_DECL(2,X) + +#define FP_UNPACK_RAW_E(X, val) \ + do { \ + union _FP_UNION_E _flo; _flo.flt = (val); \ + \ + X##_f0 = _flo.bits.frac; \ + X##_f1 = 0; \ + X##_e = _flo.bits.exp; \ + X##_s = _flo.bits.sign; \ + } while (0) + +#define FP_UNPACK_RAW_EP(X, val) \ + do { \ + union _FP_UNION_E *_flo = \ + (union _FP_UNION_E *)(val); \ + \ + X##_f0 = _flo->bits.frac; \ + X##_f1 = 0; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + } while (0) + +#define FP_PACK_RAW_E(val, X) \ + do { \ + union _FP_UNION_E _flo; \ + \ + if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \ + else X##_f0 &= ~(_FP_IMPLBIT_E); \ + _flo.bits.frac = X##_f0; \ + _flo.bits.exp = X##_e; \ + _flo.bits.sign = X##_s; \ + \ + (val) = _flo.flt; \ + } while (0) + +#define FP_PACK_RAW_EP(fs, val, X) \ + do { \ + if (!FP_INHIBIT_RESULTS) \ + { \ + union _FP_UNION_E *_flo = \ + (union _FP_UNION_E *)(val); \ + \ + if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \ + else X##_f0 &= ~(_FP_IMPLBIT_E); \ + _flo->bits.frac = X##_f0; \ + _flo->bits.exp = X##_e; \ + _flo->bits.sign = X##_s; \ + } \ + } while (0) + + +#define FP_UNPACK_E(X,val) \ + do { \ + FP_UNPACK_RAW_E(X,val); \ + _FP_UNPACK_CANONICAL(E,2,X); \ + } while (0) + +#define FP_UNPACK_EP(X,val) \ + do { \ + FP_UNPACK_RAW_EP(X,val); \ + _FP_UNPACK_CANONICAL(E,2,X); \ + } while (0) + +#define FP_UNPACK_SEMIRAW_E(X,val) \ + do { \ + FP_UNPACK_RAW_E(X,val); \ + _FP_UNPACK_SEMIRAW(E,2,X); \ + } while (0) + +#define FP_UNPACK_SEMIRAW_EP(X,val) \ + do { \ + FP_UNPACK_RAW_EP(X,val); \ + _FP_UNPACK_SEMIRAW(E,2,X); \ + } while (0) + +#define FP_PACK_E(val,X) \ + do { \ + _FP_PACK_CANONICAL(E,2,X); \ + FP_PACK_RAW_E(val,X); \ + } while (0) + +#define FP_PACK_EP(val,X) \ + do { \ + _FP_PACK_CANONICAL(E,2,X); \ + FP_PACK_RAW_EP(val,X); \ + } while (0) + +#define FP_PACK_SEMIRAW_E(val,X) \ + do { \ + _FP_PACK_SEMIRAW(E,2,X); \ + FP_PACK_RAW_E(val,X); \ + } while (0) + +#define FP_PACK_SEMIRAW_EP(val,X) \ + do { \ + _FP_PACK_SEMIRAW(E,2,X); \ + FP_PACK_RAW_EP(val,X); \ + } while (0) + +#define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,2,X) +#define FP_NEG_E(R,X) _FP_NEG(E,2,R,X) +#define FP_ADD_E(R,X,Y) _FP_ADD(E,2,R,X,Y) +#define FP_SUB_E(R,X,Y) _FP_SUB(E,2,R,X,Y) +#define FP_MUL_E(R,X,Y) _FP_MUL(E,2,R,X,Y) +#define FP_DIV_E(R,X,Y) _FP_DIV(E,2,R,X,Y) +#define FP_SQRT_E(R,X) _FP_SQRT(E,2,R,X) + +/* + * Square root algorithms: + * We have just one right now, maybe Newton approximation + * should be added for those machines where division is fast. + * We optimize it by doing most of the calculations + * in one UWtype registers instead of two, although we don't + * have to. + */ +#define _FP_SQRT_MEAT_E(R, S, T, X, q) \ + do { \ + q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ + _FP_FRAC_SRL_2(X, (_FP_WORKBITS)); \ + while (q) \ + { \ + T##_f0 = S##_f0 + q; \ + if (T##_f0 <= X##_f0) \ + { \ + S##_f0 = T##_f0 + q; \ + X##_f0 -= T##_f0; \ + R##_f0 += q; \ + } \ + _FP_FRAC_SLL_1(X, 1); \ + q >>= 1; \ + } \ + _FP_FRAC_SLL_2(R, (_FP_WORKBITS)); \ + if (X##_f0) \ + { \ + if (S##_f0 < X##_f0) \ + R##_f0 |= _FP_WORK_ROUND; \ + R##_f0 |= _FP_WORK_STICKY; \ + } \ + } while (0) + +#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,2,r,X,Y,un) +#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,2,r,X,Y) +#define FP_CMP_UNORD_E(r,X,Y) _FP_CMP_UNORD(E,2,r,X,Y) + +#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,2,r,X,rsz,rsg) +#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,2,X,r,rs,rt) + +#define _FP_FRAC_HIGH_E(X) (X##_f1) +#define _FP_FRAC_HIGH_RAW_E(X) (X##_f0) + +#endif /* not _FP_W_TYPE_SIZE < 64 */ diff --git a/libc/sysdeps/linux/sparc/soft-fp/longlong.h b/libc/sysdeps/linux/sparc/soft-fp/longlong.h new file mode 100644 index 000000000..a2f38ae2a --- /dev/null +++ b/libc/sysdeps/linux/sparc/soft-fp/longlong.h @@ -0,0 +1,1461 @@ +/* longlong.h -- definitions for mixed size 32/64 bit arithmetic. + Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, + 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. + + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* You have to define the following before including this file: + + UWtype -- An unsigned type, default type for operations (typically a "word") + UHWtype -- An unsigned type, at least half the size of UWtype. + UDWtype -- An unsigned type, at least twice as large a UWtype + W_TYPE_SIZE -- size in bits of UWtype + + UQItype -- Unsigned 8 bit type. + SItype, USItype -- Signed and unsigned 32 bit types. + DItype, UDItype -- Signed and unsigned 64 bit types. + + On a 32 bit machine UWtype should typically be USItype; + on a 64 bit machine, UWtype should typically be UDItype. */ + +#define __BITS4 (W_TYPE_SIZE / 4) +#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1)) +#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2)) + +#ifndef W_TYPE_SIZE +#define W_TYPE_SIZE 32 +#define UWtype USItype +#define UHWtype USItype +#define UDWtype UDItype +#endif + +extern const UQItype __clz_tab[256] attribute_hidden; + +/* Define auxiliary asm macros. + + 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two + UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype + word product in HIGH_PROD and LOW_PROD. + + 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a + UDWtype product. This is just a variant of umul_ppmm. + + 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + denominator) divides a UDWtype, composed by the UWtype integers + HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient + in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less + than DENOMINATOR for correct operation. If, in addition, the most + significant bit of DENOMINATOR must be 1, then the pre-processor symbol + UDIV_NEEDS_NORMALIZATION is defined to 1. + + 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + denominator). Like udiv_qrnnd but the numbers are signed. The quotient + is rounded towards 0. + + 5) count_leading_zeros(count, x) counts the number of zero-bits from the + msb to the first nonzero bit in the UWtype X. This is the number of + steps X needs to be shifted left to set the msb. Undefined for X == 0, + unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value. + + 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts + from the least significant end. + + 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, + high_addend_2, low_addend_2) adds two UWtype integers, composed by + HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2 + respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow + (i.e. carry out) is not stored anywhere, and is lost. + + 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend, + high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers, + composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and + LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE + and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, + and is lost. + + If any of these macros are left undefined for a particular CPU, + C macros are used. */ + +/* The CPUs come in alphabetical order below. + + Please add support for more CPUs here, or improve the current support + for the CPUs below! + (E.g. WE32100, IBM360.) */ + +#if defined (__GNUC__) && !defined (NO_ASM) + +/* We sometimes need to clobber "cc" with gcc2, but that would not be + understood by gcc1. Use cpp to avoid major code duplication. */ +#if __GNUC__ < 2 +#define __CLOBBER_CC +#define __AND_CLOBBER_CC +#else /* __GNUC__ >= 2 */ +#define __CLOBBER_CC : "cc" +#define __AND_CLOBBER_CC , "cc" +#endif /* __GNUC__ < 2 */ + +#if defined (__alpha) && W_TYPE_SIZE == 64 +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + UDItype __m0 = (m0), __m1 = (m1); \ + (ph) = __builtin_alpha_umulh (__m0, __m1); \ + (pl) = __m0 * __m1; \ + } while (0) +#define UMUL_TIME 46 +#ifndef LONGLONG_STANDALONE +#define udiv_qrnnd(q, r, n1, n0, d) \ + do { UDItype __r; \ + (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ + (r) = __r; \ + } while (0) +extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype); +#define UDIV_TIME 220 +#endif /* LONGLONG_STANDALONE */ +#ifdef __alpha_cix__ +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clzl (X)) +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzl (X)) +#define COUNT_LEADING_ZEROS_0 64 +#else +#define count_leading_zeros(COUNT,X) \ + do { \ + UDItype __xr = (X), __t, __a; \ + __t = __builtin_alpha_cmpbge (0, __xr); \ + __a = __clz_tab[__t ^ 0xff] - 1; \ + __t = __builtin_alpha_extbl (__xr, __a); \ + (COUNT) = 64 - (__clz_tab[__t] + __a*8); \ + } while (0) +#define count_trailing_zeros(COUNT,X) \ + do { \ + UDItype __xr = (X), __t, __a; \ + __t = __builtin_alpha_cmpbge (0, __xr); \ + __t = ~__t & -~__t; \ + __a = ((__t & 0xCC) != 0) * 2; \ + __a += ((__t & 0xF0) != 0) * 4; \ + __a += ((__t & 0xAA) != 0); \ + __t = __builtin_alpha_extbl (__xr, __a); \ + __a <<= 3; \ + __t &= -__t; \ + __a += ((__t & 0xCC) != 0) * 2; \ + __a += ((__t & 0xF0) != 0) * 4; \ + __a += ((__t & 0xAA) != 0); \ + (COUNT) = __a; \ + } while (0) +#endif /* __alpha_cix__ */ +#endif /* __alpha */ + +#if defined (__arc__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add.f %1, %4, %5\n\tadc %0, %2, %3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%r" ((USItype) (ah)), \ + "rIJ" ((USItype) (bh)), \ + "%r" ((USItype) (al)), \ + "rIJ" ((USItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub.f %1, %4, %5\n\tsbc %0, %2, %3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "r" ((USItype) (ah)), \ + "rIJ" ((USItype) (bh)), \ + "r" ((USItype) (al)), \ + "rIJ" ((USItype) (bl))) +/* Call libgcc routine. */ +#define umul_ppmm(w1, w0, u, v) \ +do { \ + DWunion __w; \ + __w.ll = __umulsidi3 (u, v); \ + w1 = __w.s.high; \ + w0 = __w.s.low; \ +} while (0) +#define __umulsidi3 __umulsidi3 +UDItype __umulsidi3 (USItype, USItype); +#endif + +#if defined (__arm__) && !defined (__thumb__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("adds %1, %4, %5\n\tadc %0, %2, %3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%r" ((USItype) (ah)), \ + "rI" ((USItype) (bh)), \ + "%r" ((USItype) (al)), \ + "rI" ((USItype) (bl)) __CLOBBER_CC) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subs %1, %4, %5\n\tsbc %0, %2, %3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "r" ((USItype) (ah)), \ + "rI" ((USItype) (bh)), \ + "r" ((USItype) (al)), \ + "rI" ((USItype) (bl)) __CLOBBER_CC) +#define umul_ppmm(xh, xl, a, b) \ +{register USItype __t0, __t1, __t2; \ + __asm__ ("%@ Inlined umul_ppmm\n" \ + " mov %2, %5, lsr #16\n" \ + " mov %0, %6, lsr #16\n" \ + " bic %3, %5, %2, lsl #16\n" \ + " bic %4, %6, %0, lsl #16\n" \ + " mul %1, %3, %4\n" \ + " mul %4, %2, %4\n" \ + " mul %3, %0, %3\n" \ + " mul %0, %2, %0\n" \ + " adds %3, %4, %3\n" \ + " addcs %0, %0, #65536\n" \ + " adds %1, %1, %3, lsl #16\n" \ + " adc %0, %0, %3, lsr #16" \ + : "=&r" ((USItype) (xh)), \ + "=r" ((USItype) (xl)), \ + "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \ + : "r" ((USItype) (a)), \ + "r" ((USItype) (b)) __CLOBBER_CC );} +#define UMUL_TIME 20 +#define UDIV_TIME 100 +#endif /* __arm__ */ + +#if defined(__arm__) +/* Let gcc decide how best to implement count_leading_zeros. */ +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X)) +#define COUNT_LEADING_ZEROS_0 32 +#endif + +#if defined (__CRIS__) && __CRIS_arch_version >= 3 +#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X)) +#if __CRIS_arch_version >= 8 +#define count_trailing_zeros(COUNT, X) ((COUNT) = __builtin_ctz (X)) +#endif +#endif /* __CRIS__ */ + +#if defined (__hppa) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%rM" ((USItype) (ah)), \ + "rM" ((USItype) (bh)), \ + "%rM" ((USItype) (al)), \ + "rM" ((USItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub %4,%5,%1\n\tsubb %2,%3,%0" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "rM" ((USItype) (ah)), \ + "rM" ((USItype) (bh)), \ + "rM" ((USItype) (al)), \ + "rM" ((USItype) (bl))) +#if defined (_PA_RISC1_1) +#define umul_ppmm(w1, w0, u, v) \ + do { \ + union \ + { \ + UDItype __f; \ + struct {USItype __w1, __w0;} __w1w0; \ + } __t; \ + __asm__ ("xmpyu %1,%2,%0" \ + : "=x" (__t.__f) \ + : "x" ((USItype) (u)), \ + "x" ((USItype) (v))); \ + (w1) = __t.__w1w0.__w1; \ + (w0) = __t.__w1w0.__w0; \ + } while (0) +#define UMUL_TIME 8 +#else +#define UMUL_TIME 30 +#endif +#define UDIV_TIME 40 +#define count_leading_zeros(count, x) \ + do { \ + USItype __tmp; \ + __asm__ ( \ + "ldi 1,%0\n" \ +" extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \ +" extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n"\ +" ldo 16(%0),%0 ; Yes. Perform add.\n" \ +" extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \ +" extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n"\ +" ldo 8(%0),%0 ; Yes. Perform add.\n" \ +" extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \ +" extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n"\ +" ldo 4(%0),%0 ; Yes. Perform add.\n" \ +" extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \ +" extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n"\ +" ldo 2(%0),%0 ; Yes. Perform add.\n" \ +" extru %1,30,1,%1 ; Extract bit 1.\n" \ +" sub %0,%1,%0 ; Subtract it.\n" \ + : "=r" (count), "=r" (__tmp) : "1" (x)); \ + } while (0) +#endif + +#if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32 +#define smul_ppmm(xh, xl, m0, m1) \ + do { \ + union {DItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __x; \ + __asm__ ("lr %N0,%1\n\tmr %0,%2" \ + : "=&r" (__x.__ll) \ + : "r" (m0), "r" (m1)); \ + (xh) = __x.__i.__h; (xl) = __x.__i.__l; \ + } while (0) +#define sdiv_qrnnd(q, r, n1, n0, d) \ + do { \ + union {DItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __x; \ + __x.__i.__h = n1; __x.__i.__l = n0; \ + __asm__ ("dr %0,%2" \ + : "=r" (__x.__ll) \ + : "0" (__x.__ll), "r" (d)); \ + (q) = __x.__i.__l; (r) = __x.__i.__h; \ + } while (0) +#endif + +#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add{l} {%5,%1|%1,%5}\n\tadc{l} {%3,%0|%0,%3}" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%0" ((USItype) (ah)), \ + "g" ((USItype) (bh)), \ + "%1" ((USItype) (al)), \ + "g" ((USItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub{l} {%5,%1|%1,%5}\n\tsbb{l} {%3,%0|%0,%3}" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "0" ((USItype) (ah)), \ + "g" ((USItype) (bh)), \ + "1" ((USItype) (al)), \ + "g" ((USItype) (bl))) +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("mul{l} %3" \ + : "=a" ((USItype) (w0)), \ + "=d" ((USItype) (w1)) \ + : "%0" ((USItype) (u)), \ + "rm" ((USItype) (v))) +#define udiv_qrnnd(q, r, n1, n0, dv) \ + __asm__ ("div{l} %4" \ + : "=a" ((USItype) (q)), \ + "=d" ((USItype) (r)) \ + : "0" ((USItype) (n0)), \ + "1" ((USItype) (n1)), \ + "rm" ((USItype) (dv))) +#define count_leading_zeros(count, x) ((count) = __builtin_clz (x)) +#define count_trailing_zeros(count, x) ((count) = __builtin_ctz (x)) +#define UMUL_TIME 40 +#define UDIV_TIME 40 +#endif /* 80x86 */ + +#if (defined (__x86_64__) || defined (__i386__)) && W_TYPE_SIZE == 64 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add{q} {%5,%1|%1,%5}\n\tadc{q} {%3,%0|%0,%3}" \ + : "=r" ((UDItype) (sh)), \ + "=&r" ((UDItype) (sl)) \ + : "%0" ((UDItype) (ah)), \ + "rme" ((UDItype) (bh)), \ + "%1" ((UDItype) (al)), \ + "rme" ((UDItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub{q} {%5,%1|%1,%5}\n\tsbb{q} {%3,%0|%0,%3}" \ + : "=r" ((UDItype) (sh)), \ + "=&r" ((UDItype) (sl)) \ + : "0" ((UDItype) (ah)), \ + "rme" ((UDItype) (bh)), \ + "1" ((UDItype) (al)), \ + "rme" ((UDItype) (bl))) +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("mul{q} %3" \ + : "=a" ((UDItype) (w0)), \ + "=d" ((UDItype) (w1)) \ + : "%0" ((UDItype) (u)), \ + "rm" ((UDItype) (v))) +#define udiv_qrnnd(q, r, n1, n0, dv) \ + __asm__ ("div{q} %4" \ + : "=a" ((UDItype) (q)), \ + "=d" ((UDItype) (r)) \ + : "0" ((UDItype) (n0)), \ + "1" ((UDItype) (n1)), \ + "rm" ((UDItype) (dv))) +#define count_leading_zeros(count, x) ((count) = __builtin_clzl (x)) +#define count_trailing_zeros(count, x) ((count) = __builtin_ctzl (x)) +#define UMUL_TIME 40 +#define UDIV_TIME 40 +#endif /* x86_64 */ + +#if defined (__i960__) && W_TYPE_SIZE == 32 +#define umul_ppmm(w1, w0, u, v) \ + ({union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __xx; \ + __asm__ ("emul %2,%1,%0" \ + : "=d" (__xx.__ll) \ + : "%dI" ((USItype) (u)), \ + "dI" ((USItype) (v))); \ + (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) +#define __umulsidi3(u, v) \ + ({UDItype __w; \ + __asm__ ("emul %2,%1,%0" \ + : "=d" (__w) \ + : "%dI" ((USItype) (u)), \ + "dI" ((USItype) (v))); \ + __w; }) +#endif /* __i960__ */ + +#if defined (__M32R__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + /* The cmp clears the condition bit. */ \ + __asm__ ("cmp %0,%0\n\taddx %1,%5\n\taddx %0,%3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "0" ((USItype) (ah)), \ + "r" ((USItype) (bh)), \ + "1" ((USItype) (al)), \ + "r" ((USItype) (bl)) \ + : "cbit") +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + /* The cmp clears the condition bit. */ \ + __asm__ ("cmp %0,%0\n\tsubx %1,%5\n\tsubx %0,%3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "0" ((USItype) (ah)), \ + "r" ((USItype) (bh)), \ + "1" ((USItype) (al)), \ + "r" ((USItype) (bl)) \ + : "cbit") +#endif /* __M32R__ */ + +#if defined (__mc68000__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0" \ + : "=d" ((USItype) (sh)), \ + "=&d" ((USItype) (sl)) \ + : "%0" ((USItype) (ah)), \ + "d" ((USItype) (bh)), \ + "%1" ((USItype) (al)), \ + "g" ((USItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0" \ + : "=d" ((USItype) (sh)), \ + "=&d" ((USItype) (sl)) \ + : "0" ((USItype) (ah)), \ + "d" ((USItype) (bh)), \ + "1" ((USItype) (al)), \ + "g" ((USItype) (bl))) + +/* The '020, '030, '040, '060 and CPU32 have 32x32->64 and 64/32->32q-32r. */ +#if (defined (__mc68020__) && !defined (__mc68060__)) +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("mulu%.l %3,%1:%0" \ + : "=d" ((USItype) (w0)), \ + "=d" ((USItype) (w1)) \ + : "%0" ((USItype) (u)), \ + "dmi" ((USItype) (v))) +#define UMUL_TIME 45 +#define udiv_qrnnd(q, r, n1, n0, d) \ + __asm__ ("divu%.l %4,%1:%0" \ + : "=d" ((USItype) (q)), \ + "=d" ((USItype) (r)) \ + : "0" ((USItype) (n0)), \ + "1" ((USItype) (n1)), \ + "dmi" ((USItype) (d))) +#define UDIV_TIME 90 +#define sdiv_qrnnd(q, r, n1, n0, d) \ + __asm__ ("divs%.l %4,%1:%0" \ + : "=d" ((USItype) (q)), \ + "=d" ((USItype) (r)) \ + : "0" ((USItype) (n0)), \ + "1" ((USItype) (n1)), \ + "dmi" ((USItype) (d))) + +#elif defined (__mcoldfire__) /* not mc68020 */ + +#define umul_ppmm(xh, xl, a, b) \ + __asm__ ("| Inlined umul_ppmm\n" \ + " move%.l %2,%/d0\n" \ + " move%.l %3,%/d1\n" \ + " move%.l %/d0,%/d2\n" \ + " swap %/d0\n" \ + " move%.l %/d1,%/d3\n" \ + " swap %/d1\n" \ + " move%.w %/d2,%/d4\n" \ + " mulu %/d3,%/d4\n" \ + " mulu %/d1,%/d2\n" \ + " mulu %/d0,%/d3\n" \ + " mulu %/d0,%/d1\n" \ + " move%.l %/d4,%/d0\n" \ + " clr%.w %/d0\n" \ + " swap %/d0\n" \ + " add%.l %/d0,%/d2\n" \ + " add%.l %/d3,%/d2\n" \ + " jcc 1f\n" \ + " add%.l %#65536,%/d1\n" \ + "1: swap %/d2\n" \ + " moveq %#0,%/d0\n" \ + " move%.w %/d2,%/d0\n" \ + " move%.w %/d4,%/d2\n" \ + " move%.l %/d2,%1\n" \ + " add%.l %/d1,%/d0\n" \ + " move%.l %/d0,%0" \ + : "=g" ((USItype) (xh)), \ + "=g" ((USItype) (xl)) \ + : "g" ((USItype) (a)), \ + "g" ((USItype) (b)) \ + : "d0", "d1", "d2", "d3", "d4") +#define UMUL_TIME 100 +#define UDIV_TIME 400 +#else /* not ColdFire */ +/* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */ +#define umul_ppmm(xh, xl, a, b) \ + __asm__ ("| Inlined umul_ppmm\n" \ + " move%.l %2,%/d0\n" \ + " move%.l %3,%/d1\n" \ + " move%.l %/d0,%/d2\n" \ + " swap %/d0\n" \ + " move%.l %/d1,%/d3\n" \ + " swap %/d1\n" \ + " move%.w %/d2,%/d4\n" \ + " mulu %/d3,%/d4\n" \ + " mulu %/d1,%/d2\n" \ + " mulu %/d0,%/d3\n" \ + " mulu %/d0,%/d1\n" \ + " move%.l %/d4,%/d0\n" \ + " eor%.w %/d0,%/d0\n" \ + " swap %/d0\n" \ + " add%.l %/d0,%/d2\n" \ + " add%.l %/d3,%/d2\n" \ + " jcc 1f\n" \ + " add%.l %#65536,%/d1\n" \ + "1: swap %/d2\n" \ + " moveq %#0,%/d0\n" \ + " move%.w %/d2,%/d0\n" \ + " move%.w %/d4,%/d2\n" \ + " move%.l %/d2,%1\n" \ + " add%.l %/d1,%/d0\n" \ + " move%.l %/d0,%0" \ + : "=g" ((USItype) (xh)), \ + "=g" ((USItype) (xl)) \ + : "g" ((USItype) (a)), \ + "g" ((USItype) (b)) \ + : "d0", "d1", "d2", "d3", "d4") +#define UMUL_TIME 100 +#define UDIV_TIME 400 + +#endif /* not mc68020 */ + +/* The '020, '030, '040 and '060 have bitfield insns. + cpu32 disguises as a 68020, but lacks them. */ +#if defined (__mc68020__) && !defined (__mcpu32__) +#define count_leading_zeros(count, x) \ + __asm__ ("bfffo %1{%b2:%b2},%0" \ + : "=d" ((USItype) (count)) \ + : "od" ((USItype) (x)), "n" (0)) +/* Some ColdFire architectures have a ff1 instruction supported via + __builtin_clz. */ +#elif defined (__mcfisaaplus__) || defined (__mcfisac__) +#define count_leading_zeros(count,x) ((count) = __builtin_clz (x)) +#define COUNT_LEADING_ZEROS_0 32 +#endif +#endif /* mc68000 */ + +#if defined (__m88000__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%rJ" ((USItype) (ah)), \ + "rJ" ((USItype) (bh)), \ + "%rJ" ((USItype) (al)), \ + "rJ" ((USItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "rJ" ((USItype) (ah)), \ + "rJ" ((USItype) (bh)), \ + "rJ" ((USItype) (al)), \ + "rJ" ((USItype) (bl))) +#define count_leading_zeros(count, x) \ + do { \ + USItype __cbtmp; \ + __asm__ ("ff1 %0,%1" \ + : "=r" (__cbtmp) \ + : "r" ((USItype) (x))); \ + (count) = __cbtmp ^ 31; \ + } while (0) +#define COUNT_LEADING_ZEROS_0 63 /* sic */ +#if defined (__mc88110__) +#define umul_ppmm(wh, wl, u, v) \ + do { \ + union {UDItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __xx; \ + __asm__ ("mulu.d %0,%1,%2" \ + : "=r" (__xx.__ll) \ + : "r" ((USItype) (u)), \ + "r" ((USItype) (v))); \ + (wh) = __xx.__i.__h; \ + (wl) = __xx.__i.__l; \ + } while (0) +#define udiv_qrnnd(q, r, n1, n0, d) \ + ({union {UDItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __xx; \ + USItype __q; \ + __xx.__i.__h = (n1); __xx.__i.__l = (n0); \ + __asm__ ("divu.d %0,%1,%2" \ + : "=r" (__q) \ + : "r" (__xx.__ll), \ + "r" ((USItype) (d))); \ + (r) = (n0) - __q * (d); (q) = __q; }) +#define UMUL_TIME 5 +#define UDIV_TIME 25 +#else +#define UMUL_TIME 17 +#define UDIV_TIME 150 +#endif /* __mc88110__ */ +#endif /* __m88000__ */ + +#if defined (__mips__) && W_TYPE_SIZE == 32 +#define umul_ppmm(w1, w0, u, v) \ + do { \ + UDItype __x = (UDItype) (USItype) (u) * (USItype) (v); \ + (w1) = (USItype) (__x >> 32); \ + (w0) = (USItype) (__x); \ + } while (0) +#define UMUL_TIME 10 +#define UDIV_TIME 100 + +#if (__mips == 32 || __mips == 64) && ! __mips16 +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X)) +#define COUNT_LEADING_ZEROS_0 32 +#endif +#endif /* __mips__ */ + +#if defined (__ns32000__) && W_TYPE_SIZE == 32 +#define umul_ppmm(w1, w0, u, v) \ + ({union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __xx; \ + __asm__ ("meid %2,%0" \ + : "=g" (__xx.__ll) \ + : "%0" ((USItype) (u)), \ + "g" ((USItype) (v))); \ + (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) +#define __umulsidi3(u, v) \ + ({UDItype __w; \ + __asm__ ("meid %2,%0" \ + : "=g" (__w) \ + : "%0" ((USItype) (u)), \ + "g" ((USItype) (v))); \ + __w; }) +#define udiv_qrnnd(q, r, n1, n0, d) \ + ({union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __xx; \ + __xx.__i.__h = (n1); __xx.__i.__l = (n0); \ + __asm__ ("deid %2,%0" \ + : "=g" (__xx.__ll) \ + : "0" (__xx.__ll), \ + "g" ((USItype) (d))); \ + (r) = __xx.__i.__l; (q) = __xx.__i.__h; }) +#define count_trailing_zeros(count,x) \ + do { \ + __asm__ ("ffsd %2,%0" \ + : "=r" ((USItype) (count)) \ + : "0" ((USItype) 0), \ + "r" ((USItype) (x))); \ + } while (0) +#endif /* __ns32000__ */ + +/* FIXME: We should test _IBMR2 here when we add assembly support for the + system vendor compilers. + FIXME: What's needed for gcc PowerPC VxWorks? __vxworks__ is not good + enough, since that hits ARM and m68k too. */ +#if (defined (_ARCH_PPC) /* AIX */ \ + || defined (_ARCH_PWR) /* AIX */ \ + || defined (_ARCH_COM) /* AIX */ \ + || defined (__powerpc__) /* gcc */ \ + || defined (__POWERPC__) /* BEOS */ \ + || defined (__ppc__) /* Darwin */ \ + || (defined (PPC) && ! defined (CPU_FAMILY)) /* gcc 2.7.x GNU&SysV */ \ + || (defined (PPC) && defined (CPU_FAMILY) /* VxWorks */ \ + && CPU_FAMILY == PPC) \ + ) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ + else \ + __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ + : "=r" (sh), "=&r" (sl) \ + : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \ + } while (0) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (ah) && (ah) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ + else \ + __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ + : "=r" (sh), "=&r" (sl) \ + : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \ + } while (0) +#define count_leading_zeros(count, x) \ + __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x)) +#define COUNT_LEADING_ZEROS_0 32 +#if defined (_ARCH_PPC) || defined (__powerpc__) || defined (__POWERPC__) \ + || defined (__ppc__) \ + || (defined (PPC) && ! defined (CPU_FAMILY)) /* gcc 2.7.x GNU&SysV */ \ + || (defined (PPC) && defined (CPU_FAMILY) /* VxWorks */ \ + && CPU_FAMILY == PPC) +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define UMUL_TIME 15 +#define smul_ppmm(ph, pl, m0, m1) \ + do { \ + SItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define SMUL_TIME 14 +#define UDIV_TIME 120 +#elif defined (_ARCH_PWR) +#define UMUL_TIME 8 +#define smul_ppmm(xh, xl, m0, m1) \ + __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1)) +#define SMUL_TIME 4 +#define sdiv_qrnnd(q, r, nh, nl, d) \ + __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d)) +#define UDIV_TIME 100 +#endif +#endif /* 32-bit POWER architecture variants. */ + +/* We should test _IBMR2 here when we add assembly support for the system + vendor compilers. */ +#if (defined (_ARCH_PPC64) || defined (__powerpc64__)) && W_TYPE_SIZE == 64 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ + else \ + __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ + : "=r" (sh), "=&r" (sl) \ + : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \ + } while (0) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (ah) && (ah) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ + else \ + __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ + : "=r" (sh), "=&r" (sl) \ + : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \ + } while (0) +#define count_leading_zeros(count, x) \ + __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x)) +#define COUNT_LEADING_ZEROS_0 64 +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + UDItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define UMUL_TIME 15 +#define smul_ppmm(ph, pl, m0, m1) \ + do { \ + DItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define SMUL_TIME 14 /* ??? */ +#define UDIV_TIME 120 /* ??? */ +#endif /* 64-bit PowerPC. */ + +#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("a %1,%5\n\tae %0,%3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%0" ((USItype) (ah)), \ + "r" ((USItype) (bh)), \ + "%1" ((USItype) (al)), \ + "r" ((USItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("s %1,%5\n\tse %0,%3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "0" ((USItype) (ah)), \ + "r" ((USItype) (bh)), \ + "1" ((USItype) (al)), \ + "r" ((USItype) (bl))) +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ( \ + "s r2,r2\n" \ +" mts r10,%2\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" m r2,%3\n" \ +" cas %0,r2,r0\n" \ +" mfs r10,%1" \ + : "=r" ((USItype) (ph)), \ + "=r" ((USItype) (pl)) \ + : "%r" (__m0), \ + "r" (__m1) \ + : "r2"); \ + (ph) += ((((SItype) __m0 >> 31) & __m1) \ + + (((SItype) __m1 >> 31) & __m0)); \ + } while (0) +#define UMUL_TIME 20 +#define UDIV_TIME 200 +#define count_leading_zeros(count, x) \ + do { \ + if ((x) >= 0x10000) \ + __asm__ ("clz %0,%1" \ + : "=r" ((USItype) (count)) \ + : "r" ((USItype) (x) >> 16)); \ + else \ + { \ + __asm__ ("clz %0,%1" \ + : "=r" ((USItype) (count)) \ + : "r" ((USItype) (x))); \ + (count) += 16; \ + } \ + } while (0) +#endif + +#if defined(__sh__) && !__SHMEDIA__ && W_TYPE_SIZE == 32 +#ifndef __sh1__ +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ( \ + "dmulu.l %2,%3\n\tsts%M1 macl,%1\n\tsts%M0 mach,%0" \ + : "=r<" ((USItype)(w1)), \ + "=r<" ((USItype)(w0)) \ + : "r" ((USItype)(u)), \ + "r" ((USItype)(v)) \ + : "macl", "mach") +#define UMUL_TIME 5 +#endif + +/* This is the same algorithm as __udiv_qrnnd_c. */ +#define UDIV_NEEDS_NORMALIZATION 1 + +#define udiv_qrnnd(q, r, n1, n0, d) \ + do { \ + extern UWtype __udiv_qrnnd_16 (UWtype, UWtype) \ + __attribute__ ((visibility ("hidden"))); \ + /* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */ \ + __asm__ ( \ + "mov%M4 %4,r5\n" \ +" swap.w %3,r4\n" \ +" swap.w r5,r6\n" \ +" jsr @%5\n" \ +" shll16 r6\n" \ +" swap.w r4,r4\n" \ +" jsr @%5\n" \ +" swap.w r1,%0\n" \ +" or r1,%0" \ + : "=r" (q), "=&z" (r) \ + : "1" (n1), "r" (n0), "rm" (d), "r" (&__udiv_qrnnd_16) \ + : "r1", "r2", "r4", "r5", "r6", "pr"); \ + } while (0) + +#define UDIV_TIME 80 + +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("clrt;subc %5,%1; subc %4,%0" \ + : "=r" (sh), "=r" (sl) \ + : "0" (ah), "1" (al), "r" (bh), "r" (bl)) + +#endif /* __sh__ */ + +#if defined (__SH5__) && __SHMEDIA__ && W_TYPE_SIZE == 32 +#define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v) +#define count_leading_zeros(count, x) \ + do \ + { \ + UDItype x_ = (USItype)(x); \ + SItype c_; \ + \ + __asm__ ("nsb %1, %0" : "=r" (c_) : "r" (x_)); \ + (count) = c_ - 31; \ + } \ + while (0) +#define COUNT_LEADING_ZEROS_0 32 +#endif + +#if defined (__sparc__) && !defined (__arch64__) && !defined (__sparcv9) \ + && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%rJ" ((USItype) (ah)), \ + "rI" ((USItype) (bh)), \ + "%rJ" ((USItype) (al)), \ + "rI" ((USItype) (bl)) \ + __CLOBBER_CC) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "rJ" ((USItype) (ah)), \ + "rI" ((USItype) (bh)), \ + "rJ" ((USItype) (al)), \ + "rI" ((USItype) (bl)) \ + __CLOBBER_CC) +#if defined (__sparc_v8__) +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("umul %2,%3,%1;rd %%y,%0" \ + : "=r" ((USItype) (w1)), \ + "=r" ((USItype) (w0)) \ + : "r" ((USItype) (u)), \ + "r" ((USItype) (v))) +#define udiv_qrnnd(__q, __r, __n1, __n0, __d) \ + __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\ + : "=&r" ((USItype) (__q)), \ + "=&r" ((USItype) (__r)) \ + : "r" ((USItype) (__n1)), \ + "r" ((USItype) (__n0)), \ + "r" ((USItype) (__d))) +#else +#if defined (__sparclite__) +/* This has hardware multiply but not divide. It also has two additional + instructions scan (ffs from high bit) and divscc. */ +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("umul %2,%3,%1;rd %%y,%0" \ + : "=r" ((USItype) (w1)), \ + "=r" ((USItype) (w0)) \ + : "r" ((USItype) (u)), \ + "r" ((USItype) (v))) +#define udiv_qrnnd(q, r, n1, n0, d) \ + __asm__ ("! Inlined udiv_qrnnd\n" \ +" wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \ +" tst %%g0\n" \ +" divscc %3,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1\n" \ +" divscc %%g1,%4,%%g1