diff options
author | Mike Frysinger <vapier@gentoo.org> | 2006-02-04 01:20:54 +0000 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2006-02-04 01:20:54 +0000 |
commit | c429bf3057c87dd9545fc2d824b5930c52f7dfb8 (patch) | |
tree | f207534c6a7a16795f8e7a099c1b48180e9db94f /libc | |
parent | 798669dc65077f81b6facd2415463a1bda13c4d7 (diff) |
import files from glibc for an ia64/static port
Diffstat (limited to 'libc')
51 files changed, 5209 insertions, 0 deletions
diff --git a/libc/string/ia64/Makefile b/libc/string/ia64/Makefile new file mode 100644 index 000000000..0a95346fd --- /dev/null +++ b/libc/string/ia64/Makefile @@ -0,0 +1,13 @@ +# Makefile for uClibc +# +# Copyright (C) 2000-2005 Erik Andersen <andersen@uclibc.org> +# +# Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. +# + +top_srcdir:=../../../ +top_builddir:=../../../ +all: objs +include $(top_builddir)Rules.mak +include ../Makefile.in +include $(top_srcdir)Makerules diff --git a/libc/string/ia64/bcopy.S b/libc/string/ia64/bcopy.S new file mode 100644 index 000000000..c4eb22b1f --- /dev/null +++ b/libc/string/ia64/bcopy.S @@ -0,0 +1,10 @@ +#include "sysdep.h" + +ENTRY(bcopy) + .regstk 3, 0, 0, 0 + mov r8 = in0 + mov in0 = in1 + ;; + mov in1 = r8 + br.cond.sptk.many HIDDEN_JUMPTARGET(memmove) +END(bcopy) diff --git a/libc/string/ia64/bzero.S b/libc/string/ia64/bzero.S new file mode 100644 index 000000000..bcca41d5e --- /dev/null +++ b/libc/string/ia64/bzero.S @@ -0,0 +1,315 @@ +/* Optimized version of the standard bzero() function. + This file is part of the GNU C Library. + Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc. + Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>. + Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch> + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* Return: dest + + Inputs: + in0: dest + in1: count + + The algorithm is fairly straightforward: set byte by byte until we + we get to a 16B-aligned address, then loop on 128 B chunks using an + early store as prefetching, then loop on 32B chucks, then clear remaining + words, finally clear remaining bytes. + Since a stf.spill f0 can store 16B in one go, we use this instruction + to get peak speed. */ + +#include <sysdep.h> +#undef ret + +#define dest in0 +#define cnt in1 + +#define tmp r31 +#define save_lc r30 +#define ptr0 r29 +#define ptr1 r28 +#define ptr2 r27 +#define ptr3 r26 +#define ptr9 r24 +#define loopcnt r23 +#define linecnt r22 +#define bytecnt r21 + +// This routine uses only scratch predicate registers (p6 - p15) +#define p_scr p6 // default register for same-cycle branches +#define p_unalgn p9 +#define p_y p11 +#define p_n p12 +#define p_yy p13 +#define p_nn p14 + +#define movi0 mov + +#define MIN1 15 +#define MIN1P1HALF 8 +#define LINE_SIZE 128 +#define LSIZE_SH 7 // shift amount +#define PREF_AHEAD 8 + +#define USE_FLP +#if defined(USE_INT) +#define store st8 +#define myval r0 +#elif defined(USE_FLP) +#define store stf8 +#define myval f0 +#endif + +.align 64 +ENTRY(bzero) +{ .mmi + .prologue + alloc tmp = ar.pfs, 2, 0, 0, 0 + lfetch.nt1 [dest] + .save ar.lc, save_lc + movi0 save_lc = ar.lc +} { .mmi + .body + mov ret0 = dest // return value + nop.m 0 + cmp.eq p_scr, p0 = cnt, r0 +;; } +{ .mmi + and ptr2 = -(MIN1+1), dest // aligned address + and tmp = MIN1, dest // prepare to check for alignment + tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U) +} { .mib + mov ptr1 = dest + nop.i 0 +(p_scr) br.ret.dpnt.many rp // return immediately if count = 0 +;; } +{ .mib + cmp.ne p_unalgn, p0 = tmp, r0 +} { .mib // NB: # of bytes to move is 1 + sub bytecnt = (MIN1+1), tmp // higher than loopcnt + cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? +(p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U) +;; } +{ .mmi +(p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment +(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment +(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ? +;; } +{ .mib +(p_y) add cnt = -8, cnt +(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ? +} { .mib +(p_y) st8 [ptr2] = r0,-4 +(p_n) add ptr2 = 4, ptr2 +;; } +{ .mib +(p_yy) add cnt = -4, cnt +(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ? +} { .mib +(p_yy) st4 [ptr2] = r0,-2 +(p_nn) add ptr2 = 2, ptr2 +;; } +{ .mmi + mov tmp = LINE_SIZE+1 // for compare +(p_y) add cnt = -2, cnt +(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ? +} { .mmi + nop.m 0 +(p_y) st2 [ptr2] = r0,-1 +(p_n) add ptr2 = 1, ptr2 +;; } + +{ .mmi +(p_yy) st1 [ptr2] = r0 + cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? +} { .mbb +(p_yy) add cnt = -1, cnt +(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few +;; } +{ .mib + nop.m 0 + shr.u linecnt = cnt, LSIZE_SH + nop.b 0 +;; } + + .align 32 +.l1b: // ------------------// L1B: store ahead into cache lines; fill later +{ .mmi + and tmp = -(LINE_SIZE), cnt // compute end of range + mov ptr9 = ptr1 // used for prefetching + and cnt = (LINE_SIZE-1), cnt // remainder +} { .mmi + mov loopcnt = PREF_AHEAD-1 // default prefetch loop + cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value +;; } +{ .mmi +(p_scr) add loopcnt = -1, linecnt + add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores) + add ptr1 = tmp, ptr1 // first address beyond total range +;; } +{ .mmi + add tmp = -1, linecnt // next loop count + movi0 ar.lc = loopcnt +;; } +.pref_l1b: +{ .mib + stf.spill [ptr9] = f0, 128 // Do stores one cache line apart + nop.i 0 + br.cloop.dptk.few .pref_l1b +;; } +{ .mmi + add ptr0 = 16, ptr2 // Two stores in parallel + movi0 ar.lc = tmp +;; } +.l1bx: + { .mmi + stf.spill [ptr2] = f0, 32 + stf.spill [ptr0] = f0, 32 + ;; } + { .mmi + stf.spill [ptr2] = f0, 32 + stf.spill [ptr0] = f0, 32 + ;; } + { .mmi + stf.spill [ptr2] = f0, 32 + stf.spill [ptr0] = f0, 64 + cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? + ;; } +{ .mmb + stf.spill [ptr2] = f0, 32 +(p_scr) stf.spill [ptr9] = f0, 128 + br.cloop.dptk.few .l1bx +;; } +{ .mib + cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? +(p_scr) br.cond.dpnt.many .move_bytes_from_alignment +;; } + +.fraction_of_line: +{ .mib + add ptr2 = 16, ptr1 + shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32 +;; } +{ .mib + cmp.eq p_scr, p0 = loopcnt, r0 + add loopcnt = -1, loopcnt +(p_scr) br.cond.dpnt.many .store_words +;; } +{ .mib + and cnt = 0x1f, cnt // compute the remaining cnt + movi0 ar.lc = loopcnt +;; } + .align 32 +.l2: // -----------------------------// L2A: store 32B in 2 cycles +{ .mmb + store [ptr1] = myval, 8 + store [ptr2] = myval, 8 +;; } { .mmb + store [ptr1] = myval, 24 + store [ptr2] = myval, 24 + br.cloop.dptk.many .l2 +;; } +.store_words: +{ .mib + cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? +(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch +;; } + +{ .mmi + store [ptr1] = myval, 8 // store + cmp.le p_y, p_n = 16, cnt // + add cnt = -8, cnt // subtract +;; } +{ .mmi +(p_y) store [ptr1] = myval, 8 // store +(p_y) cmp.le.unc p_yy, p_nn = 16, cnt +(p_y) add cnt = -8, cnt // subtract +;; } +{ .mmi // store +(p_yy) store [ptr1] = myval, 8 +(p_yy) add cnt = -8, cnt // subtract +;; } + +.move_bytes_from_alignment: +{ .mib + cmp.eq p_scr, p0 = cnt, r0 + tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ? +(p_scr) br.cond.dpnt.few .restore_and_exit +;; } +{ .mib +(p_y) st4 [ptr1] = r0,4 + tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ? +;; } +{ .mib +(p_yy) st2 [ptr1] = r0,2 + tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ? +;; } + +{ .mib +(p_y) st1 [ptr1] = r0 +;; } +.restore_and_exit: +{ .mib + nop.m 0 + movi0 ar.lc = save_lc + br.ret.sptk.many rp +;; } + +.move_bytes_unaligned: +{ .mmi + .pred.rel "mutex",p_y, p_n + .pred.rel "mutex",p_yy, p_nn +(p_n) cmp.le p_yy, p_nn = 4, cnt +(p_y) cmp.le p_yy, p_nn = 5, cnt +(p_n) add ptr2 = 2, ptr1 +} { .mmi +(p_y) add ptr2 = 3, ptr1 +(p_y) st1 [ptr1] = r0, 1 // fill 1 (odd-aligned) byte +(p_y) add cnt = -1, cnt // [15, 14 (or less) left] +;; } +{ .mmi +(p_yy) cmp.le.unc p_y, p0 = 8, cnt + add ptr3 = ptr1, cnt // prepare last store + movi0 ar.lc = save_lc +} { .mmi +(p_yy) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes +(p_yy) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes +(p_yy) add cnt = -4, cnt // [11, 10 (o less) left] +;; } +{ .mmi +(p_y) cmp.le.unc p_yy, p0 = 8, cnt + add ptr3 = -1, ptr3 // last store + tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ? +} { .mmi +(p_y) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes +(p_y) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes +(p_y) add cnt = -4, cnt // [7, 6 (or less) left] +;; } +{ .mmi +(p_yy) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes +(p_yy) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes + // [3, 2 (or less) left] + tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ? +} { .mmi +(p_yy) add cnt = -4, cnt +;; } +{ .mmb +(p_scr) st2 [ptr1] = r0 // fill 2 (aligned) bytes +(p_y) st1 [ptr3] = r0 // fill last byte (using ptr3) + br.ret.sptk.many rp +;; } +END(bzero) diff --git a/libc/string/ia64/memccpy.S b/libc/string/ia64/memccpy.S new file mode 100644 index 000000000..53c43c512 --- /dev/null +++ b/libc/string/ia64/memccpy.S @@ -0,0 +1,213 @@ +/* Optimized version of the memccpy() function. + This file is part of the GNU C Library. + Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc. + Contributed by Dan Pop <Dan.Pop@cern.ch>. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* Return: a pointer to the next byte after char in dest or NULL + + Inputs: + in0: dest + in1: src + in2: char + in3: byte count + + This implementation assumes little endian mode (UM.be = 0). + + This implementation assumes that it is safe to do read ahead + in the src block, without getting beyond its limit. */ + +#include <sysdep.h> +#undef ret + +#define OP_T_THRES 16 +#define OPSIZ 8 + +#define saved_pr r17 +#define saved_lc r18 +#define dest r19 +#define src r20 +#define len r21 +#define asrc r22 +#define tmp r23 +#define char r24 +#define charx8 r25 +#define saved_ec r26 +#define sh2 r28 +#define sh1 r29 +#define loopcnt r30 +#define value r31 + +#ifdef GAS_ALIGN_BREAKS_UNWIND_INFO +/* Manually force proper loop-alignment. Note: be sure to + double-check the code-layout after making any changes to + this routine! */ +# define ALIGN(n) { nop 0 } +#else +# define ALIGN(n) .align n +#endif + +ENTRY(memccpy) + .prologue + alloc r2 = ar.pfs, 4, 40 - 4, 0, 40 + +#include "softpipe.h" + .rotr r[MEMLAT + 7], tmp1[4], tmp2[4], val[4], tmp3[2], pos0[2] + .rotp p[MEMLAT + 6 + 1] + + mov ret0 = r0 // return NULL if no match + .save pr, saved_pr + mov saved_pr = pr // save the predicate registers + mov dest = in0 // dest + .save ar.lc, saved_lc + mov saved_lc = ar.lc // save the loop counter + mov saved_ec = ar.ec // save the loop counter + .body + mov src = in1 // src + extr.u char = in2, 0, 8 // char + mov len = in3 // len + sub tmp = r0, in0 // tmp = -dest + cmp.ne p7, p0 = r0, r0 // clear p7 + ;; + and loopcnt = 7, tmp // loopcnt = -dest % 8 + cmp.ge p6, p0 = OP_T_THRES, len // is len <= OP_T_THRES + mov ar.ec = 0 // ec not guaranteed zero on entry +(p6) br.cond.spnt .cpyfew // copy byte by byte + ;; + cmp.eq p6, p0 = loopcnt, r0 + mux1 charx8 = char, @brcst +(p6) br.cond.sptk .dest_aligned + sub len = len, loopcnt // len -= -dest % 8 + adds loopcnt = -1, loopcnt // --loopcnt + ;; + mov ar.lc = loopcnt +.l1: // copy -dest % 8 bytes + ld1 value = [src], 1 // value = *src++ + ;; + st1 [dest] = value, 1 // *dest++ = value + cmp.eq p6, p0 = value, char +(p6) br.cond.spnt .foundit + br.cloop.dptk .l1 +.dest_aligned: + and sh1 = 7, src // sh1 = src % 8 + and tmp = -8, len // tmp = len & -OPSIZ + and asrc = -8, src // asrc = src & -OPSIZ -- align src + shr.u loopcnt = len, 3 // loopcnt = len / 8 + and len = 7, len ;; // len = len % 8 + shl sh1 = sh1, 3 // sh1 = 8 * (src % 8) + adds loopcnt = -1, loopcnt // --loopcnt + mov pr.rot = 1 << 16 ;; // set rotating predicates + sub sh2 = 64, sh1 // sh2 = 64 - sh1 + mov ar.lc = loopcnt // set LC + cmp.eq p6, p0 = sh1, r0 // is the src aligned? +(p6) br.cond.sptk .src_aligned ;; + add src = src, tmp // src += len & -OPSIZ + mov ar.ec = MEMLAT + 6 + 1 // six more passes needed + ld8 r[1] = [asrc], 8 // r[1] = w0 + cmp.ne p6, p0 = r0, r0 ;; // clear p6 + ALIGN(32) +.l2: +(p[0]) ld8.s r[0] = [asrc], 8 // r[0] = w1 +(p[MEMLAT]) shr.u tmp1[0] = r[1 + MEMLAT], sh1 // tmp1 = w0 >> sh1 +(p[MEMLAT]) shl tmp2[0] = r[0 + MEMLAT], sh2 // tmp2 = w1 << sh2 +(p[MEMLAT+4]) xor tmp3[0] = val[1], charx8 +(p[MEMLAT+5]) czx1.r pos0[0] = tmp3[1] +(p[MEMLAT+6]) chk.s r[6 + MEMLAT], .recovery1 // our data isn't + // valid - rollback! +(p[MEMLAT+6]) cmp.ne p6, p0 = 8, pos0[1] +(p6) br.cond.spnt .gotit +(p[MEMLAT+6]) st8 [dest] = val[3], 8 // store val to dest +(p[MEMLAT+3]) or val[0] = tmp1[3], tmp2[3] // val = tmp1 | tmp2 + br.ctop.sptk .l2 + br.cond.sptk .cpyfew + +.src_aligned: + cmp.ne p6, p0 = r0, r0 // clear p6 + mov ar.ec = MEMLAT + 2 + 1 ;; // set EC +.l3: +(p[0]) ld8.s r[0] = [src], 8 +(p[MEMLAT]) xor tmp3[0] = r[MEMLAT], charx8 +(p[MEMLAT+1]) czx1.r pos0[0] = tmp3[1] +(p[MEMLAT+2]) cmp.ne p7, p0 = 8, pos0[1] +(p[MEMLAT+2]) chk.s r[MEMLAT+2], .recovery2 +(p7) br.cond.spnt .gotit +.back2: +(p[MEMLAT+2]) st8 [dest] = r[MEMLAT+2], 8 + br.ctop.dptk .l3 +.cpyfew: + cmp.eq p6, p0 = len, r0 // is len == 0 ? + adds len = -1, len // --len; +(p6) br.cond.spnt .restore_and_exit ;; + mov ar.lc = len +.l4: + ld1 value = [src], 1 + ;; + st1 [dest] = value, 1 + cmp.eq p6, p0 = value, char +(p6) br.cond.spnt .foundit + br.cloop.dptk .l4 ;; +.foundit: +(p6) mov ret0 = dest +.restore_and_exit: + mov pr = saved_pr, -1 // restore the predicate registers + mov ar.lc = saved_lc // restore the loop counter + mov ar.ec = saved_ec ;; // restore the epilog counter + br.ret.sptk.many b0 +.gotit: + .pred.rel "mutex" p6, p7 +(p6) mov value = val[3] // if coming from l2 +(p7) mov value = r[MEMLAT+2] // if coming from l3 + mov ar.lc = pos0[1] ;; +.l5: + extr.u tmp = value, 0, 8 ;; + st1 [dest] = tmp, 1 + shr.u value = value, 8 + br.cloop.sptk .l5 ;; + mov ret0 = dest + mov pr = saved_pr, -1 + mov ar.lc = saved_lc + br.ret.sptk.many b0 + +.recovery1: + adds src = -(MEMLAT + 6 + 1) * 8, asrc + mov loopcnt = ar.lc + mov tmp = ar.ec ;; + sub sh1 = (MEMLAT + 6 + 1), tmp + shr.u sh2 = sh2, 3 + ;; + shl loopcnt = loopcnt, 3 + sub src = src, sh2 + shl sh1 = sh1, 3 + shl tmp = tmp, 3 + ;; + add len = len, loopcnt + add src = sh1, src ;; + add len = tmp, len +.back1: + br.cond.sptk .cpyfew + +.recovery2: + add tmp = -(MEMLAT + 3) * 8, src +(p7) br.cond.spnt .gotit + ;; + ld8 r[MEMLAT+2] = [tmp] ;; + xor pos0[1] = r[MEMLAT+2], charx8 ;; + czx1.r pos0[1] = pos0[1] ;; + cmp.ne p7, p6 = 8, pos0[1] +(p7) br.cond.spnt .gotit + br.cond.sptk .back2 +END(memccpy) diff --git a/libc/string/ia64/memchr.S b/libc/string/ia64/memchr.S new file mode 100644 index 000000000..d7742fe8a --- /dev/null +++ b/libc/string/ia64/memchr.S @@ -0,0 +1,133 @@ +/* Optimized version of the standard memchr() function. + This file is part of the GNU C Library. + Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc. + Contributed by Dan Pop <Dan.Pop@cern.ch>. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* Return: the address of the first occurence of chr in str or NULL + + Inputs: + in0: str + in1: chr + in2: byte count + + This implementation assumes little endian mode. For big endian mode, + the instruction czx1.r should be replaced by czx1.l. + + The algorithm is fairly straightforward: search byte by byte until we + we get to a word aligned address, then search word by word as much as + possible; the remaining few bytes are searched one at a time. + + The word by word search is performed by xor-ing the word with a word + containing chr in every byte. If there is a hit, the result will + contain a zero byte in the corresponding position. The presence and + position of that zero byte is detected with a czx instruction. + + All the loops in this function could have had the internal branch removed + if br.ctop and br.cloop could be predicated :-(. */ + +#include <sysdep.h> +#undef ret + +#define saved_pr r15 +#define saved_lc r16 +#define chr r17 +#define len r18 +#define pos0 r20 +#define val r21 +#define tmp r24 +#define chrx8 r25 +#define loopcnt r30 + +#define str in0 + +ENTRY(__memchr) + .prologue + alloc r2 = ar.pfs, 3, 0, 29, 32 +#include "softpipe.h" + .rotr value[MEMLAT+1], addr[MEMLAT+3], aux[2], poschr[2] + .rotp p[MEMLAT+3] + .save ar.lc, saved_lc + mov saved_lc = ar.lc // save the loop counter + .save pr, saved_pr + mov saved_pr = pr // save the predicates + .body + mov ret0 = str + and tmp = 7, str // tmp = str % 8 + cmp.ne p7, p0 = r0, r0 // clear p7 + extr.u chr = in1, 0, 8 // chr = (unsigned char) in1 + mov len = in2 + cmp.gtu p6, p0 = 16, in2 // use a simple loop for short +(p6) br.cond.spnt .srchfew ;; // searches + sub loopcnt = 8, tmp // loopcnt = 8 - tmp + cmp.eq p6, p0 = tmp, r0 +(p6) br.cond.sptk .str_aligned;; + sub len = len, loopcnt + adds loopcnt = -1, loopcnt;; + mov ar.lc = loopcnt +.l1: + ld1 val = [ret0], 1 + ;; + cmp.eq p6, p0 = val, chr +(p6) br.cond.spnt .foundit + br.cloop.sptk .l1 ;; +.str_aligned: + cmp.ne p6, p0 = r0, r0 // clear p6 + shr.u loopcnt = len, 3 // loopcnt = len / 8 + and len = 7, len ;; // remaining len = len & 7 + adds loopcnt = -1, loopcnt + mov ar.ec = MEMLAT + 3 + mux1 chrx8 = chr, @brcst ;; // get a word full of chr + mov ar.lc = loopcnt + mov pr.rot = 1 << 16 ;; +.l2: +(p[0]) mov addr[0] = ret0 +(p[0]) ld8 value[0] = [ret0], 8 +(p[MEMLAT]) xor aux[0] = value[MEMLAT], chrx8 +(p[MEMLAT+1]) czx1.r poschr[0] = aux[1] +(p[MEMLAT+2]) cmp.ne p7, p0 = 8, poschr[1] +(p7) br.cond.dpnt .foundit + br.ctop.dptk .l2 +.srchfew: + adds loopcnt = -1, len + cmp.eq p6, p0 = len, r0 +(p6) br.cond.spnt .notfound ;; + mov ar.lc = loopcnt +.l3: + ld1 val = [ret0], 1 + ;; + cmp.eq p6, p0 = val, chr +(p6) br.cond.dpnt .foundit + br.cloop.sptk .l3 ;; +.notfound: + cmp.ne p6, p0 = r0, r0 // clear p6 (p7 was already 0 when we got here) + mov ret0 = r0 ;; // return NULL +.foundit: + .pred.rel "mutex" p6, p7 +(p6) adds ret0 = -1, ret0 // if we got here from l1 or l3 +(p7) add ret0 = addr[MEMLAT+2], poschr[1] // if we got here from l2 + mov pr = saved_pr, -1 + mov ar.lc = saved_lc |