summaryrefslogtreecommitdiff
path: root/libc/string
diff options
context:
space:
mode:
authorBernhard Reutner-Fischer <rep.dot.nop@gmail.com>2008-10-03 13:59:52 +0000
committerBernhard Reutner-Fischer <rep.dot.nop@gmail.com>2008-10-03 13:59:52 +0000
commit2ba017a2d5af01cc3ef0dc554252a521e8d7c4f8 (patch)
tree0e0db7e3fbb4fbe1be3c56ad6c80bb7d63effb93 /libc/string
parent94bbeb72728193288f2bf071cf0e40293499045b (diff)
- use c89-style comments
Closes issue #5194
Diffstat (limited to 'libc/string')
-rw-r--r--libc/string/bfin/memchr.S4
-rw-r--r--libc/string/bfin/strcmp.S80
-rw-r--r--libc/string/generic/strtok_r.c14
-rw-r--r--libc/string/ia64/bzero.S136
-rw-r--r--libc/string/ia64/memccpy.S102
-rw-r--r--libc/string/ia64/memchr.S32
-rw-r--r--libc/string/ia64/memcmp.S94
-rw-r--r--libc/string/ia64/memcpy.S160
-rw-r--r--libc/string/ia64/memmove.S170
-rw-r--r--libc/string/ia64/memset.S178
-rw-r--r--libc/string/ia64/strchr.S32
-rw-r--r--libc/string/ia64/strcmp.S2
-rw-r--r--libc/string/ia64/strcpy.S66
-rw-r--r--libc/string/ia64/strlen.S18
-rw-r--r--libc/string/ia64/strncmp.S10
-rw-r--r--libc/string/ia64/strncpy.S90
-rw-r--r--libc/string/sh64/memcpy.S2
-rw-r--r--libc/string/sh64/memset.S29
-rw-r--r--libc/string/sh64/strcpy.S28
-rw-r--r--libc/string/xtensa/memcpy.S22
-rw-r--r--libc/string/xtensa/memset.S12
-rw-r--r--libc/string/xtensa/strcmp.S148
-rw-r--r--libc/string/xtensa/strcpy.S72
-rw-r--r--libc/string/xtensa/strlen.S56
-rw-r--r--libc/string/xtensa/strncpy.S150
25 files changed, 854 insertions, 853 deletions
diff --git a/libc/string/bfin/memchr.S b/libc/string/bfin/memchr.S
index 88e46bef6..26d419f7c 100644
--- a/libc/string/bfin/memchr.S
+++ b/libc/string/bfin/memchr.S
@@ -25,8 +25,8 @@
.weak _memchr
ENTRY(_memchr)
- P0 = R0; // P0 = address
- P2 = R2; // P2 = count
+ P0 = R0; /* P0 = address */
+ P2 = R2; /* P2 = count */
R1 = R1.B(Z);
CC = R2 == 0;
IF CC JUMP .Lfailed;
diff --git a/libc/string/bfin/strcmp.S b/libc/string/bfin/strcmp.S
index 12e8c53c6..ef23aa9ab 100644
--- a/libc/string/bfin/strcmp.S
+++ b/libc/string/bfin/strcmp.S
@@ -29,66 +29,66 @@ ENTRY(_strcmp)
p1 = r0;
p2 = r1;
- p0 = -1; // (need for loop counter init)
+ p0 = -1; /* (need for loop counter init) */
- // check if byte aligned
- r0 = r0 | r1; // check both pointers at same time
- r0 <<= 30; // dump all but last 2 bits
- cc = az; // are they zero?
- if !cc jump .Lunaligned; // no; use unaligned code.
- // fall-thru for aligned case..
+ /* check if byte aligned */
+ r0 = r0 | r1; /* check both pointers at same time */
+ r0 <<= 30; /* dump all but last 2 bits */
+ cc = az; /* are they zero? */
+ if !cc jump .Lunaligned; /* no; use unaligned code. */
+ /* fall-thru for aligned case.. */
- // note that r0 is zero from the previous...
- // p0 set to -1
+ /* note that r0 is zero from the previous... */
+ /* p0 set to -1 */
LSETUP (.Lbeginloop, .Lendloop) lc0=p0;
- // pick up first words
+ /* pick up first words */
r1 = [p1++];
r2 = [p2++];
- // make up mask: 0FF0FF
+ /* make up mask: 0FF0FF */
r7 = 0xFF;
r7.h = 0xFF;
- // loop : 9 cycles to check 4 characters
+ /* loop : 9 cycles to check 4 characters */
cc = r1 == r2;
.Lbeginloop:
- if !cc jump .Lnotequal4; // compare failure, exit loop
+ if !cc jump .Lnotequal4; /* compare failure, exit loop */
- // starting with 44332211
- // see if char 3 or char 1 is 0
- r3 = r1 & r7; // form 00330011
- // add to zero, and (r2 is free, reload)
+ /* starting with 44332211 */
+ /* see if char 3 or char 1 is 0 */
+ r3 = r1 & r7; /* form 00330011 */
+ /* add to zero, and (r2 is free, reload) */
r6 = r3 +|+ r0 || r2 = [p2++] || nop;
- cc = az; // true if either is zero
- r3 = r1 ^ r3; // form 44002200 (4321^0301 => 4020)
- // (trick, saves having another mask)
- // add to zero, and (r1 is free, reload)
+ cc = az; /* true if either is zero */
+ r3 = r1 ^ r3; /* form 44002200 (4321^0301 => 4020) */
+ /* (trick, saves having another mask) */
+ /* add to zero, and (r1 is free, reload) */
r6 = r3 +|+ r0 || r1 = [p1++] || nop;
- cc |= az; // true if either is zero
- if cc jump .Lzero4; // leave if a zero somewhere
+ cc |= az; /* true if either is zero */
+ if cc jump .Lzero4; /* leave if a zero somewhere */
.Lendloop:
cc = r1 == r2;
- // loop exits
-.Lnotequal4: // compare failure on 4-char compare
- // address pointers are one word ahead;
- // faster to use zero4 exit code
+ /* loop exits */
+.Lnotequal4: /* compare failure on 4-char compare */
+ /* address pointers are one word ahead; */
+ /* faster to use zero4 exit code */
p1 += 4;
p2 += 4;
-.Lzero4: // one of the bytes in word 1 is zero
- // but we've already fetched the next word; so
- // backup two to look at failing word again
+.Lzero4: /* one of the bytes in word 1 is zero */
+ /* but we've already fetched the next word; so */
+ /* backup two to look at failing word again */
p1 += -8;
p2 += -8;
- // here when pointers are unaligned: checks one
- // character at a time. Also use at the end of
- // the word-check algorithm to figure out what happened
+ /* here when pointers are unaligned: checks one */
+ /* character at a time. Also use at the end of */
+ /* the word-check algorithm to figure out what happened */
.Lunaligned:
- // R0 is non-zero from before.
- // p0 set to -1
+ /* R0 is non-zero from before. */
+ /* p0 set to -1 */
r0 = 0 (Z);
r1 = B[p1++] (Z);
@@ -96,18 +96,18 @@ ENTRY(_strcmp)
LSETUP (.Lbeginloop1, .Lendloop1) lc0=p0;
.Lbeginloop1:
- cc = r1; // first char must be non-zero
- // chars must be the same
+ cc = r1; /* first char must be non-zero */
+ /* chars must be the same */
r3 = r2 - r1 (NS) || r1 = B[p1++] (Z) || nop;
cc &= az;
- r3 = r0 - r2; // second char must be non-zero
+ r3 = r0 - r2; /* second char must be non-zero */
cc &= an;
if !cc jump .Lexitloop1;
.Lendloop1:
r2 = B[p2++] (Z);
-.Lexitloop1: // here means we found a zero or a difference.
- // we have r2(N), p2(N), r1(N+1), p1(N+2)
+.Lexitloop1: /* here means we found a zero or a difference. */
+ /* we have r2(N), p2(N), r1(N+1), p1(N+2) */
r1=B[p1+ -2] (Z);
r0 = r1 - r2;
(r7:4) = [sp++];
diff --git a/libc/string/generic/strtok_r.c b/libc/string/generic/strtok_r.c
index d082d226e..7648212f7 100644
--- a/libc/string/generic/strtok_r.c
+++ b/libc/string/generic/strtok_r.c
@@ -29,17 +29,17 @@
# define __rawmemchr strchr
/* Experimentally off - libc_hidden_proto(strchr) */
#endif
-
-/* Parse S into tokens separated by characters in DELIM.
+#if 0
+ Parse S into tokens separated by characters in DELIM.
If S is NULL, the saved pointer in SAVE_PTR is used as
the next starting point. For example:
char s[] = "-abc-=-def";
char *sp;
- x = strtok_r(s, "-", &sp); // x = "abc", sp = "=-def"
- x = strtok_r(NULL, "-=", &sp); // x = "def", sp = NULL
- x = strtok_r(NULL, "=", &sp); // x = NULL
- // s = "abc\0-def\0"
-*/
+ x = strtok_r(s, "-", &sp); /* x = "abc", sp = "=-def" */
+ x = strtok_r(NULL, "-=", &sp); /* x = "def", sp = NULL */
+ x = strtok_r(NULL, "=", &sp); /* x = NULL */
+ /* s = "abc\0-def\0" */
+#endif
char *strtok_r (char *s, const char *delim, char **save_ptr)
{
char *token;
diff --git a/libc/string/ia64/bzero.S b/libc/string/ia64/bzero.S
index d390838a6..1f0f8b7ac 100644
--- a/libc/string/ia64/bzero.S
+++ b/libc/string/ia64/bzero.S
@@ -47,13 +47,13 @@
#define ptr1 r28
#define ptr2 r27
#define ptr3 r26
-#define ptr9 r24
+#define ptr9 r24
#define loopcnt r23
#define linecnt r22
#define bytecnt r21
-// This routine uses only scratch predicate registers (p6 - p15)
-#define p_scr p6 // default register for same-cycle branches
+/* This routine uses only scratch predicate registers (p6 - p15) */
+#define p_scr p6 /* default register for same-cycle branches */
#define p_unalgn p9
#define p_y p11
#define p_n p12
@@ -65,7 +65,7 @@
#define MIN1 15
#define MIN1P1HALF 8
#define LINE_SIZE 128
-#define LSIZE_SH 7 // shift amount
+#define LSIZE_SH 7 /* shift amount */
#define PREF_AHEAD 8
#define USE_FLP
@@ -87,49 +87,49 @@ ENTRY(bzero)
movi0 save_lc = ar.lc
} { .mmi
.body
- mov ret0 = dest // return value
+ mov ret0 = dest /* return value */
nop.m 0
cmp.eq p_scr, p0 = cnt, r0
;; }
{ .mmi
- and ptr2 = -(MIN1+1), dest // aligned address
- and tmp = MIN1, dest // prepare to check for alignment
- tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U)
+ and ptr2 = -(MIN1+1), dest /* aligned address */
+ and tmp = MIN1, dest /* prepare to check for alignment */
+ tbit.nz p_y, p_n = dest, 0 /* Do we have an odd address? (M_B_U) */
} { .mib
mov ptr1 = dest
nop.i 0
-(p_scr) br.ret.dpnt.many rp // return immediately if count = 0
+(p_scr) br.ret.dpnt.many rp /* return immediately if count = 0 */
;; }
{ .mib
cmp.ne p_unalgn, p0 = tmp, r0
-} { .mib // NB: # of bytes to move is 1
- sub bytecnt = (MIN1+1), tmp // higher than loopcnt
- cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
-(p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
+} { .mib /* NB: # of bytes to move is 1 */
+ sub bytecnt = (MIN1+1), tmp /* higher than loopcnt */
+ cmp.gt p_scr, p0 = 16, cnt /* is it a minimalistic task? */
+(p_scr) br.cond.dptk.many .move_bytes_unaligned /* go move just a few (M_B_U) */
;; }
{ .mmi
-(p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment
-(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ?
+(p_unalgn) add ptr1 = (MIN1+1), ptr2 /* after alignment */
+(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 /* after alignment */
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 /* should we do a st8 ? */
;; }
{ .mib
(p_y) add cnt = -8, cnt
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 /* should we do a st4 ? */
} { .mib
(p_y) st8 [ptr2] = r0,-4
(p_n) add ptr2 = 4, ptr2
;; }
{ .mib
(p_yy) add cnt = -4, cnt
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ?
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 /* should we do a st2 ? */
} { .mib
(p_yy) st4 [ptr2] = r0,-2
(p_nn) add ptr2 = 2, ptr2
;; }
{ .mmi
- mov tmp = LINE_SIZE+1 // for compare
+ mov tmp = LINE_SIZE+1 /* for compare */
(p_y) add cnt = -2, cnt
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 /* should we do a st1 ? */
} { .mmi
nop.m 0
(p_y) st2 [ptr2] = r0,-1
@@ -138,44 +138,44 @@ ENTRY(bzero)
{ .mmi
(p_yy) st1 [ptr2] = r0
- cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
+ cmp.gt p_scr, p0 = tmp, cnt /* is it a minimalistic task? */
} { .mbb
(p_yy) add cnt = -1, cnt
-(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
+(p_scr) br.cond.dpnt.many .fraction_of_line /* go move just a few */
;; }
{ .mib
- nop.m 0
+ nop.m 0
shr.u linecnt = cnt, LSIZE_SH
nop.b 0
;; }
.align 32
-.l1b: // ------------------// L1B: store ahead into cache lines; fill later
+.l1b: /* ------------------ L1B: store ahead into cache lines; fill later */
{ .mmi
- and tmp = -(LINE_SIZE), cnt // compute end of range
- mov ptr9 = ptr1 // used for prefetching
- and cnt = (LINE_SIZE-1), cnt // remainder
+ and tmp = -(LINE_SIZE), cnt /* compute end of range */
+ mov ptr9 = ptr1 /* used for prefetching */
+ and cnt = (LINE_SIZE-1), cnt /* remainder */
} { .mmi
- mov loopcnt = PREF_AHEAD-1 // default prefetch loop
- cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
+ mov loopcnt = PREF_AHEAD-1 /* default prefetch loop */
+ cmp.gt p_scr, p0 = PREF_AHEAD, linecnt /* check against actual value */
;; }
{ .mmi
(p_scr) add loopcnt = -1, linecnt
- add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores)
- add ptr1 = tmp, ptr1 // first address beyond total range
+ add ptr2 = 16, ptr1 /* start of stores (beyond prefetch stores) */
+ add ptr1 = tmp, ptr1 /* first address beyond total range */
;; }
{ .mmi
- add tmp = -1, linecnt // next loop count
+ add tmp = -1, linecnt /* next loop count */
movi0 ar.lc = loopcnt
;; }
.pref_l1b:
{ .mib
- stf.spill [ptr9] = f0, 128 // Do stores one cache line apart
+ stf.spill [ptr9] = f0, 128 /* Do stores one cache line apart */
nop.i 0
br.cloop.dptk.few .pref_l1b
;; }
{ .mmi
- add ptr0 = 16, ptr2 // Two stores in parallel
+ add ptr0 = 16, ptr2 /* Two stores in parallel */
movi0 ar.lc = tmp
;; }
.l1bx:
@@ -190,7 +190,7 @@ ENTRY(bzero)
{ .mmi
stf.spill [ptr2] = f0, 32
stf.spill [ptr0] = f0, 64
- cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
+ cmp.lt p_scr, p0 = ptr9, ptr1 /* do we need more prefetching? */
;; }
{ .mmb
stf.spill [ptr2] = f0, 32
@@ -198,14 +198,14 @@ ENTRY(bzero)
br.cloop.dptk.few .l1bx
;; }
{ .mib
- cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
+ cmp.gt p_scr, p0 = 8, cnt /* just a few bytes left ? */
(p_scr) br.cond.dpnt.many .move_bytes_from_alignment
;; }
.fraction_of_line:
{ .mib
add ptr2 = 16, ptr1
- shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32
+ shr.u loopcnt = cnt, 5 /* loopcnt = cnt / 32 */
;; }
{ .mib
cmp.eq p_scr, p0 = loopcnt, r0
@@ -213,11 +213,11 @@ ENTRY(bzero)
(p_scr) br.cond.dpnt.many .store_words
;; }
{ .mib
- and cnt = 0x1f, cnt // compute the remaining cnt
+ and cnt = 0x1f, cnt /* compute the remaining cnt */
movi0 ar.lc = loopcnt
;; }
.align 32
-.l2: // -----------------------------// L2A: store 32B in 2 cycles
+.l2: /* ----------------------------- L2A: store 32B in 2 cycles */
{ .mmb
store [ptr1] = myval, 8
store [ptr2] = myval, 8
@@ -228,38 +228,38 @@ ENTRY(bzero)
;; }
.store_words:
{ .mib
- cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
-(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
+ cmp.gt p_scr, p0 = 8, cnt /* just a few bytes left ? */
+(p_scr) br.cond.dpnt.many .move_bytes_from_alignment /* Branch */
;; }
{ .mmi
- store [ptr1] = myval, 8 // store
- cmp.le p_y, p_n = 16, cnt //
- add cnt = -8, cnt // subtract
+ store [ptr1] = myval, 8 /* store */
+ cmp.le p_y, p_n = 16, cnt /* */
+ add cnt = -8, cnt /* subtract */
;; }
{ .mmi
-(p_y) store [ptr1] = myval, 8 // store
+(p_y) store [ptr1] = myval, 8 /* store */
(p_y) cmp.le.unc p_yy, p_nn = 16, cnt
-(p_y) add cnt = -8, cnt // subtract
+(p_y) add cnt = -8, cnt /* subtract */
;; }
-{ .mmi // store
+{ .mmi /* store */
(p_yy) store [ptr1] = myval, 8
-(p_yy) add cnt = -8, cnt // subtract
+(p_yy) add cnt = -8, cnt /* subtract */
;; }
.move_bytes_from_alignment:
{ .mib
cmp.eq p_scr, p0 = cnt, r0
- tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ?
+ tbit.nz.unc p_y, p0 = cnt, 2 /* should we terminate with a st4 ? */
(p_scr) br.cond.dpnt.few .restore_and_exit
;; }
{ .mib
(p_y) st4 [ptr1] = r0,4
- tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ?
+ tbit.nz.unc p_yy, p0 = cnt, 1 /* should we terminate with a st2 ? */
;; }
{ .mib
(p_yy) st2 [ptr1] = r0,2
- tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ?
+ tbit.nz.unc p_y, p0 = cnt, 0 /* should we terminate with a st1 ? */
;; }
{ .mib
@@ -281,38 +281,38 @@ ENTRY(bzero)
(p_n) add ptr2 = 2, ptr1
} { .mmi
(p_y) add ptr2 = 3, ptr1
-(p_y) st1 [ptr1] = r0, 1 // fill 1 (odd-aligned) byte
-(p_y) add cnt = -1, cnt // [15, 14 (or less) left]
+(p_y) st1 [ptr1] = r0, 1 /* fill 1 (odd-aligned) byte */
+(p_y) add cnt = -1, cnt /* [15, 14 (or less) left] */
;; }
{ .mmi
(p_yy) cmp.le.unc p_y, p0 = 8, cnt
- add ptr3 = ptr1, cnt // prepare last store
+ add ptr3 = ptr1, cnt /* prepare last store */
movi0 ar.lc = save_lc
} { .mmi
-(p_yy) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
-(p_yy) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
-(p_yy) add cnt = -4, cnt // [11, 10 (o less) left]
+(p_yy) st2 [ptr1] = r0, 4 /* fill 2 (aligned) bytes */
+(p_yy) st2 [ptr2] = r0, 4 /* fill 2 (aligned) bytes */
+(p_yy) add cnt = -4, cnt /* [11, 10 (o less) left] */
;; }
{ .mmi
(p_y) cmp.le.unc p_yy, p0 = 8, cnt
- add ptr3 = -1, ptr3 // last store
- tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
+ add ptr3 = -1, ptr3 /* last store */
+ tbit.nz p_scr, p0 = cnt, 1 /* will there be a st2 at the end ? */
} { .mmi
-(p_y) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
-(p_y) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
-(p_y) add cnt = -4, cnt // [7, 6 (or less) left]
+(p_y) st2 [ptr1] = r0, 4 /* fill 2 (aligned) bytes */
+(p_y) st2 [ptr2] = r0, 4 /* fill 2 (aligned) bytes */
+(p_y) add cnt = -4, cnt /* [7, 6 (or less) left] */
;; }
{ .mmi
-(p_yy) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
-(p_yy) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
- // [3, 2 (or less) left]
- tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ?
+(p_yy) st2 [ptr1] = r0, 4 /* fill 2 (aligned) bytes */
+(p_yy) st2 [ptr2] = r0, 4 /* fill 2 (aligned) bytes */
+ /* [3, 2 (or less) left] */
+ tbit.nz p_y, p0 = cnt, 0 /* will there be a st1 at the end ? */
} { .mmi
(p_yy) add cnt = -4, cnt
;; }
{ .mmb
-(p_scr) st2 [ptr1] = r0 // fill 2 (aligned) bytes
-(p_y) st1 [ptr3] = r0 // fill last byte (using ptr3)
+(p_scr) st2 [ptr1] = r0 /* fill 2 (aligned) bytes */
+(p_y) st1 [ptr3] = r0 /* fill last byte (using ptr3) */
br.ret.sptk.many rp
;; }
END(bzero)
diff --git a/libc/string/ia64/memccpy.S b/libc/string/ia64/memccpy.S
index 1afba3637..259d680bc 100644
--- a/libc/string/ia64/memccpy.S
+++ b/libc/string/ia64/memccpy.S
@@ -23,7 +23,7 @@
Inputs:
in0: dest
in1: src
- in2: char
+ in2: char
in3: byte count
This implementation assumes little endian mode (UM.be = 0).
@@ -69,75 +69,75 @@ ENTRY(memccpy)
.rotr r[MEMLAT + 7], tmp1[4], tmp2[4], val[4], tmp3[2], pos0[2]
.rotp p[MEMLAT + 6 + 1]
- mov ret0 = r0 // return NULL if no match
+ mov ret0 = r0 /* return NULL if no match */
.save pr, saved_pr
- mov saved_pr = pr // save the predicate registers
- mov dest = in0 // dest
+ mov saved_pr = pr /* save the predicate registers */
+ mov dest = in0 /* dest */
.save ar.lc, saved_lc
- mov saved_lc = ar.lc // save the loop counter
- mov saved_ec = ar.ec // save the loop counter
+ mov saved_lc = ar.lc /* save the loop counter */
+ mov saved_ec = ar.ec /* save the loop counter */
.body
- mov src = in1 // src
- extr.u char = in2, 0, 8 // char
- mov len = in3 // len
- sub tmp = r0, in0 // tmp = -dest
- cmp.ne p7, p0 = r0, r0 // clear p7
+ mov src = in1 /* src */
+ extr.u char = in2, 0, 8 /* char */
+ mov len = in3 /* len */
+ sub tmp = r0, in0 /* tmp = -dest */
+ cmp.ne p7, p0 = r0, r0 /* clear p7 */
;;
- and loopcnt = 7, tmp // loopcnt = -dest % 8
- cmp.ge p6, p0 = OP_T_THRES, len // is len <= OP_T_THRES
- mov ar.ec = 0 // ec not guaranteed zero on entry
-(p6) br.cond.spnt .cpyfew // copy byte by byte
+ and loopcnt = 7, tmp /* loopcnt = -dest % 8 */
+ cmp.ge p6, p0 = OP_T_THRES, len /* is len <= OP_T_THRES */
+ mov ar.ec = 0 /* ec not guaranteed zero on entry */
+(p6) br.cond.spnt .cpyfew /* copy byte by byte */
;;
cmp.eq p6, p0 = loopcnt, r0
mux1 charx8 = char, @brcst
(p6) br.cond.sptk .dest_aligned
- sub len = len, loopcnt // len -= -dest % 8
- adds loopcnt = -1, loopcnt // --loopcnt
+ sub len = len, loopcnt /* len -= -dest % 8 */
+ adds loopcnt = -1, loopcnt /* --loopcnt */
;;
mov ar.lc = loopcnt
-.l1: // copy -dest % 8 bytes
- ld1 value = [src], 1 // value = *src++
+.l1: /* copy -dest % 8 bytes */
+ ld1 value = [src], 1 /* value = *src++ */
;;
- st1 [dest] = value, 1 // *dest++ = value
+ st1 [dest] = value, 1 /* *dest++ = value */
cmp.eq p6, p0 = value, char
(p6) br.cond.spnt .foundit
br.cloop.dptk .l1
.dest_aligned:
- and sh1 = 7, src // sh1 = src % 8
- and tmp = -8, len // tmp = len & -OPSIZ
- and asrc = -8, src // asrc = src & -OPSIZ -- align src
- shr.u loopcnt = len, 3 // loopcnt = len / 8
- and len = 7, len ;; // len = len % 8
- shl sh1 = sh1, 3 // sh1 = 8 * (src % 8)
- adds loopcnt = -1, loopcnt // --loopcnt
- mov pr.rot = 1 << 16 ;; // set rotating predicates
- sub sh2 = 64, sh1 // sh2 = 64 - sh1
- mov ar.lc = loopcnt // set LC
- cmp.eq p6, p0 = sh1, r0 // is the src aligned?
+ and sh1 = 7, src /* sh1 = src % 8 */
+ and tmp = -8, len /* tmp = len & -OPSIZ */
+ and asrc = -8, src /* asrc = src & -OPSIZ -- align src */
+ shr.u loopcnt = len, 3 /* loopcnt = len / 8 */
+ and len = 7, len ;; /* len = len % 8 */
+ shl sh1 = sh1, 3 /* sh1 = 8 * (src % 8) */
+ adds loopcnt = -1, loopcnt /* --loopcnt */
+ mov pr.rot = 1 << 16 ;; /* set rotating predicates */
+ sub sh2 = 64, sh1 /* sh2 = 64 - sh1 */
+ mov ar.lc = loopcnt /* set LC */
+ cmp.eq p6, p0 = sh1, r0 /* is the src aligned? */
(p6) br.cond.sptk .src_aligned ;;
- add src = src, tmp // src += len & -OPSIZ
- mov ar.ec = MEMLAT + 6 + 1 // six more passes needed
- ld8 r[1] = [asrc], 8 // r[1] = w0
- cmp.ne p6, p0 = r0, r0 ;; // clear p6
+ add src = src, tmp /* src += len & -OPSIZ */
+ mov ar.ec = MEMLAT + 6 + 1 /* six more passes needed */
+ ld8 r[1] = [asrc], 8 /* r[1] = w0 */
+ cmp.ne p6, p0 = r0, r0 ;; /* clear p6 */
ALIGN(32)
.l2:
-(p[0]) ld8.s r[0] = [asrc], 8 // r[0] = w1
-(p[MEMLAT]) shr.u tmp1[0] = r[1 + MEMLAT], sh1 // tmp1 = w0 >> sh1
-(p[MEMLAT]) shl tmp2[0] = r[0 + MEMLAT], sh2 // tmp2 = w1 << sh2
+(p[0]) ld8.s r[0] = [asrc], 8 /* r[0] = w1 */
+(p[MEMLAT]) shr.u tmp1[0] = r[1 + MEMLAT], sh1 /* tmp1 = w0 >> sh1 */
+(p[MEMLAT]) shl tmp2[0] = r[0 + MEMLAT], sh2 /* tmp2 = w1 << sh2 */
(p[MEMLAT+4]) xor tmp3[0] = val[1], charx8
(p[MEMLAT+5]) czx1.r pos0[0] = tmp3[1]
-(p[MEMLAT+6]) chk.s r[6 + MEMLAT], .recovery1 // our data isn't
- // valid - rollback!
+(p[MEMLAT+6]) chk.s r[6 + MEMLAT], .recovery1 /* our data isn't */
+ /* valid - rollback! */
(p[MEMLAT+6]) cmp.ne p6, p0 = 8, pos0[1]
(p6) br.cond.spnt .gotit
-(p[MEMLAT+6]) st8 [dest] = val[3], 8 // store val to dest
-(p[MEMLAT+3]) or val[0] = tmp1[3], tmp2[3] // val = tmp1 | tmp2
+(p[MEMLAT+6]) st8 [dest] = val[3], 8 /* store val to dest */
+(p[MEMLAT+3]) or val[0] = tmp1[3], tmp2[3] /* val = tmp1 | tmp2 */
br.ctop.sptk .l2
br.cond.sptk .cpyfew
.src_aligned:
- cmp.ne p6, p0 = r0, r0 // clear p6
- mov ar.ec = MEMLAT + 2 + 1 ;; // set EC
+ cmp.ne p6, p0 = r0, r0 /* clear p6 */
+ mov ar.ec = MEMLAT + 2 + 1 ;; /* set EC */
.l3:
(p[0]) ld8.s r[0] = [src], 8
(p[MEMLAT]) xor tmp3[0] = r[MEMLAT], charx8
@@ -149,8 +149,8 @@ ENTRY(memccpy)
(p[MEMLAT+2]) st8 [dest] = r[MEMLAT+2], 8
br.ctop.dptk .l3
.cpyfew:
- cmp.eq p6, p0 = len, r0 // is len == 0 ?
- adds len = -1, len // --len;
+ cmp.eq p6, p0 = len, r0 /* is len == 0 ? */
+ adds len = -1, len /* --len; */
(p6) br.cond.spnt .restore_and_exit ;;
mov ar.lc = len
.l4:
@@ -163,14 +163,14 @@ ENTRY(memccpy)
.foundit:
(p6) mov ret0 = dest
.restore_and_exit:
- mov pr = saved_pr, -1 // restore the predicate registers
- mov ar.lc = saved_lc // restore the loop counter
- mov ar.ec = saved_ec ;; // restore the epilog counter
+ mov pr = saved_pr, -1 /* restore the predicate registers */
+ mov ar.lc = saved_lc /* restore the loop counter */
+ mov ar.ec = saved_ec ;; /* restore the epilog counter */
br.ret.sptk.many b0
.gotit:
.pred.rel "mutex" p6, p7
-(p6) mov value = val[3] // if coming from l2
-(p7) mov value = r[MEMLAT+2] // if coming from l3
+(p6) mov value = val[3] /* if coming from l2 */
+(p7) mov value = r[MEMLAT+2] /* if coming from l3 */
mov ar.lc = pos0[1] ;;
.l5:
extr.u tmp = value, 0, 8 ;;
diff --git a/libc/string/ia64/memchr.S b/libc/string/ia64/memchr.S
index 2bf078fe6..0246b5997 100644
--- a/libc/string/ia64/memchr.S
+++ b/libc/string/ia64/memchr.S
@@ -62,18 +62,18 @@ ENTRY(__memchr)
.rotr value[MEMLAT+1], addr[MEMLAT+3], aux[2], poschr[2]
.rotp p[MEMLAT+3]
.save ar.lc, saved_lc
- mov saved_lc = ar.lc // save the loop counter
+ mov saved_lc = ar.lc /* save the loop counter */
.save pr, saved_pr
- mov saved_pr = pr // save the predicates
+ mov saved_pr = pr /* save the predicates */
.body
mov ret0 = str
- and tmp = 7, str // tmp = str % 8
- cmp.ne p7, p0 = r0, r0 // clear p7
- extr.u chr = in1, 0, 8 // chr = (unsigned char) in1
+ and tmp = 7, str /* tmp = str % 8 */
+ cmp.ne p7, p0 = r0, r0 /* clear p7 */
+ extr.u chr = in1, 0, 8 /* chr = (unsigned char) in1 */
mov len = in2
- cmp.gtu p6, p0 = 16, in2 // use a simple loop for short
-(p6) br.cond.spnt .srchfew ;; // searches
- sub loopcnt = 8, tmp // loopcnt = 8 - tmp
+ cmp.gtu p6, p0 = 16, in2 /* use a simple loop for short */
+(p6) br.cond.spnt .srchfew ;; /* searches */
+ sub loopcnt = 8, tmp /* loopcnt = 8 - tmp */
cmp.eq p6, p0 = tmp, r0
(p6) br.cond.sptk .str_aligned;;
sub len = len, loopcnt
@@ -86,12 +86,12 @@ ENTRY(__memchr)
(p6) br.cond.spnt .foundit
br.cloop.sptk .l1 ;;
.str_aligned:
- cmp.ne p6, p0 = r0, r0 // clear p6
- shr.u loopcnt = len, 3 // loopcnt = len / 8
- and len = 7, len ;; // remaining len = len & 7
+ cmp.ne p6, p0 = r0, r0 /* clear p6 */
+ shr.u loopcnt = len, 3 /* loopcnt = len / 8 */
+ and len = 7, len ;; /* remaining len = len & 7 */
adds loopcnt = -1, loopcnt
mov ar.ec = MEMLAT + 3
- mux1 chrx8 = chr, @brcst ;; // get a word full of chr
+ mux1 chrx8 = chr, @brcst ;; /* get a word full of chr */
mov ar.lc = loopcnt
mov pr.rot = 1 << 16 ;;
.l2:
@@ -114,12 +114,12 @@ ENTRY(__memchr)
(p6) br.cond.dpnt .foundit
br.cloop.sptk .l3 ;;
.notfound:
- cmp.ne p6, p0 = r0, r0 // clear p6 (p7 was already 0 when we got here)
- mov ret0 = r0 ;; // return NULL
+ cmp.ne p6, p0 = r0, r0 /* clear p6 (p7 was already 0 when we got here) */
+ mov ret0 = r0 ;; /* return NULL */
.foundit:
.pred.rel "mutex" p6, p7
-(p6) adds ret0 = -1, ret0 // if we got here from l1 or l3
-(p7) add ret0 = addr[MEMLAT+2], poschr[1] // if we got here from l2
+(p6) adds ret0 = -1, ret0 /* if we got here from l1 or l3 */
+(p7) add ret0 = addr[MEMLAT+2], poschr[1] /* if we got here from l2 */
mov pr = saved_pr, -1
mov ar.lc = saved_lc
br.ret.sptk.many b0
diff --git a/libc/string/ia64/memcmp.S b/libc/string/ia64/memcmp.S
index 8b0c096ce..adb1a20de 100644
--- a/libc/string/ia64/memcmp.S
+++ b/libc/string/ia64/memcmp.S
@@ -28,7 +28,7 @@
In this form, it assumes little endian mode. For big endian mode, the
the two shifts in .l2 must be inverted:
- shl tmp1[0] = r[1 + MEMLAT], sh1 // tmp1 = w0 << sh1
+ shl tmp1[0] = r[1 + MEMLAT], sh1 // tmp1 = w0 << sh1
shr.u tmp2[0] = r[0 + MEMLAT], sh2 // tmp2 = w1 >> sh2
and all the mux1 instructions should be replaced by plain mov's. */
@@ -36,8 +36,8 @@
#include "sysdep.h"
#undef ret
-#define OP_T_THRES 16
-#define OPSIZ 8
+#define OP_T_THRES 16
+#define OPSIZ 8
#define MEMLAT 2
#define start r15
@@ -56,85 +56,85 @@
ENTRY(memcmp)
.prologue
- alloc r2 = ar.pfs, 3, 37, 0, 40
+ alloc r2 = ar.pfs, 3, 37, 0, 40
.rotr r[MEMLAT + 2], q[MEMLAT + 5], tmp1[4], tmp2[4], val[2]
.rotp p[MEMLAT + 4 + 1]
- mov ret0 = r0 // by default return value = 0
+ mov ret0 = r0 /* by default return value = 0 */
.save pr, saved_pr
- mov saved_pr = pr // save the predicate registers
+ mov saved_pr = pr /* save the predicate registers */
.save ar.lc, saved_lc
- mov saved_lc = ar.lc // save the loop counter
+ mov saved_lc = ar.lc /* save the loop counter */
.body
- mov dest = in0 // dest
- mov src = in1 // src
- mov len = in2 // len
- sub tmp = r0, in0 // tmp = -dest
+ mov dest = in0 /* dest */
+ mov src = in1 /* src */
+ mov len = in2 /* len */
+ sub tmp = r0, in0 /* tmp = -dest */
;;
- and loopcnt = 7, tmp // loopcnt = -dest % 8
- cmp.ge p6, p0 = OP_T_THRES, len // is len <= OP_T_THRES
-(p6) br.cond.spnt .cmpfew // compare byte by byte
+ and loopcnt = 7, tmp /* loopcnt = -dest % 8 */
+ cmp.ge p6, p0 = OP_T_THRES, len /* is len <= OP_T_THRES */
+(p6) br.cond.spnt .cmpfew /* compare byte by byte */
;;
cmp.eq p6, p0 = loopcnt, r0
(p6) br.cond.sptk .dest_aligned
- sub len = len, loopcnt // len -= -dest % 8
- adds loopcnt = -1, loopcnt // --loopcnt
+ sub len = len, loopcnt /* len -= -dest % 8 */
+ adds loopcnt = -1, loopcnt /* --loopcnt */
;;
mov ar.lc = loopcnt
-.l1: // copy -dest % 8 bytes
- ld1 value1 = [src], 1 // value = *src++
+.l1: /* copy -dest % 8 bytes */
+ ld1 value1 = [src], 1 /* value = *src++ */
ld1 value2 = [dest], 1
;;
cmp.ne p6, p0 = value1, value2
(p6) br.cond.spnt .done
br.cloop.dptk .l1
.dest_aligned:
- and sh1 = 7, src // sh1 = src % 8
- and tmp = -8, len // tmp = len & -OPSIZ
- and asrc = -8, src // asrc = src & -OPSIZ -- align src
- shr.u loopcnt = len, 3 // loopcnt = len / 8
- and len = 7, len ;; // len = len % 8
- shl sh1 = sh1, 3 // sh1 = 8 * (src % 8)
- adds loopcnt = -1, loopcnt // --loopcnt
- mov pr.rot = 1 << 16 ;; // set rotating predicates
- sub sh2 = 64, sh1 // sh2 = 64 - sh1
- mov ar.lc = loopcnt // set LC
- cmp.eq p6, p0 = sh1, r0 // is the src aligned?
+ and sh1 = 7, src /* sh1 = src % 8 */
+ and tmp = -8, len /* tmp = len & -OPSIZ */
+ and asrc = -8, src /* asrc = src & -OPSIZ -- align src */
+ shr.u loopcnt = len, 3 /* loopcnt = len / 8 */
+ and len = 7, len ;; /* len = len % 8 */
+ shl sh1 = sh1, 3 /* sh1 = 8 * (src % 8) */
+ adds loopcnt = -1, loopcnt /* --loopcnt */
+ mov pr.rot = 1 << 16 ;; /* set rotating predicates */
+ sub sh2 = 64, sh1 /* sh2 = 64 - sh1 */
+ mov ar.lc = loopcnt /* set LC */
+ cmp.eq p6, p0 = sh1, r0 /* is the src aligned? */
(p6) br.cond.sptk .src_aligned
- add src = src, tmp // src += len & -OPSIZ
- mov ar.ec = MEMLAT + 4 + 1 // four more passes needed
- ld8 r[1] = [asrc], 8 ;; // r[1] = w0
+ add src = src, tmp /* src += len & -OPSIZ */
+ mov ar.ec = MEMLAT + 4 + 1 /* four more passes needed */
+ ld8 r[1] = [asrc], 8 ;; /* r[1] = w0 */
.align 32
-// We enter this loop with p6 cleared by the above comparison
+/* We enter this loop with p6 cleared by the above comparison */
.l2:
-(p[0]) ld8 r[0] = [asrc], 8 // r[0] = w1
+(p[0]) ld8 r[0] = [asrc], 8 /* r[0] = w1 */
(p[0]) ld8 q[0] = [dest], 8
-(p[MEMLAT]) shr.u tmp1[0] = r[1 + MEMLAT], sh1 // tmp1 = w0 >> sh1
-(p[MEMLAT]) shl tmp2[0] = r[0 + MEMLAT], sh2 // tmp2 = w1 << sh2
+(p[MEMLAT]) shr.u tmp1[0] = r[1 + MEMLAT], sh1 /* tmp1 = w0 >> sh1 */
+(p[MEMLAT]) shl tmp2[0] = r[0 + MEMLAT], sh2 /* tmp2 = w1 << sh2 */
(p[MEMLAT+4]) cmp.ne p6, p0 = q[MEMLAT + 4], val[1]
-(p[MEMLAT+3]) or val[0] = tmp1[3], tmp2[3] // val = tmp1 | tmp2