diff options
author | Bernhard Reutner-Fischer <rep.dot.nop@gmail.com> | 2008-10-03 13:59:52 +0000 |
---|---|---|
committer | Bernhard Reutner-Fischer <rep.dot.nop@gmail.com> | 2008-10-03 13:59:52 +0000 |
commit | 2ba017a2d5af01cc3ef0dc554252a521e8d7c4f8 (patch) | |
tree | 0e0db7e3fbb4fbe1be3c56ad6c80bb7d63effb93 | |
parent | 94bbeb72728193288f2bf071cf0e40293499045b (diff) |
- use c89-style comments
Closes issue #5194
61 files changed, 1062 insertions, 1060 deletions
diff --git a/include/fcntl.h b/include/fcntl.h index adeabaebb..3e0aab53e 100644 --- a/include/fcntl.h +++ b/include/fcntl.h @@ -199,7 +199,7 @@ extern int posix_fadvise64 (int __fd, __off64_t __offset, __off64_t __len, #endif -#if 0 // && defined __UCLIBC_HAS_ADVANCED_REALTIME__ +#if 0 /* && defined __UCLIBC_HAS_ADVANCED_REALTIME__ */ /* FIXME -- uClibc should probably implement these... */ diff --git a/include/libc-symbols.h b/include/libc-symbols.h index e5e3356fb..dacc03cf9 100644 --- a/include/libc-symbols.h +++ b/include/libc-symbols.h @@ -283,7 +283,7 @@ /* Tacking on "\n#APP\n\t#" to the section name makes gcc put it's bogus section attributes on what looks like a comment to the assembler. */ -#ifdef __sparc__ //HAVE_SECTION_QUOTES +#ifdef __sparc__ /* HAVE_SECTION_QUOTES */ # define __sec_comment "\"\n#APP\n\t#\"" #else # define __sec_comment "\n#APP\n\t#" diff --git a/ldso/ldso/arm/dl-startup.h b/ldso/ldso/arm/dl-startup.h index 43985d002..05741636d 100644 --- a/ldso/ldso/arm/dl-startup.h +++ b/ldso/ldso/arm/dl-startup.h @@ -159,7 +159,7 @@ void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr, if (topbits != 0xfe000000 && topbits != 0x00000000) { #if 0 - // Don't bother with this during ldso initilization... + /* Don't bother with this during ldso initilization... */ newvalue = fix_bad_pc24(reloc_addr, symbol_addr) - (unsigned long)reloc_addr + (addend << 2); topbits = newvalue & 0xfe000000; diff --git a/ldso/ldso/powerpc/resolve.S b/ldso/ldso/powerpc/resolve.S index 03c6a79b8..c83337ccd 100644 --- a/ldso/ldso/powerpc/resolve.S +++ b/ldso/ldso/powerpc/resolve.S @@ -11,19 +11,19 @@ .type _dl_linux_resolve,@function _dl_linux_resolve: -// We need to save the registers used to pass parameters, and register 0, -// which is used by _mcount; the registers are saved in a stack frame. +/* We need to save the registers used to pass parameters, and register 0, + which is used by _mcount; the registers are saved in a stack frame. */ stwu 1,-64(1) stw 0,12(1) stw 3,16(1) stw 4,20(1) -// The code that calls this has put parameters for 'fixup' in r12 and r11. +/* The code that calls this has put parameters for 'fixup' in r12 and r11. */ mr 3,12 stw 5,24(1) mr 4,11 stw 6,28(1) mflr 0 -// We also need to save some of the condition register fields. +/* We also need to save some of the condition register fields. */ stw 7,32(1) stw 0,48(1) stw 8,36(1) @@ -32,9 +32,9 @@ _dl_linux_resolve: stw 10,44(1) stw 0,8(1) bl _dl_linux_resolver@local -// 'fixup' returns the address we want to branch to. +/* 'fixup' returns the address we want to branch to. */ mtctr 3 -// Put the registers back... +/* Put the registers back... */ lwz 0,48(1) lwz 10,44(1) lwz 9,40(1) @@ -48,7 +48,7 @@ _dl_linux_resolve: lwz 4,20(1) lwz 3,16(1) lwz 0,12(1) -// ...unwind the stack frame, and jump to the PLT entry we updated. +/* ...unwind the stack frame, and jump to the PLT entry we updated. */ addi 1,1,64 bctr diff --git a/libc/inet/rpc/rpc_thread.c b/libc/inet/rpc/rpc_thread.c index 2c7b8c1ea..43d2fc547 100644 --- a/libc/inet/rpc/rpc_thread.c +++ b/libc/inet/rpc/rpc_thread.c @@ -34,7 +34,7 @@ __rpc_thread_destroy (void) if (tvp != NULL && tvp != &__libc_tsd_RPC_VARS_mem) { __rpc_thread_svc_cleanup (); __rpc_thread_clnt_cleanup (); - //__rpc_thread_key_cleanup (); + /*__rpc_thread_key_cleanup (); */ free (tvp->authnone_private_s); free (tvp->clnt_perr_buf_s); free (tvp->clntraw_private_s); diff --git a/libc/misc/internals/tempname.c b/libc/misc/internals/tempname.c index 0883259bd..c963eae1f 100644 --- a/libc/misc/internals/tempname.c +++ b/libc/misc/internals/tempname.c @@ -75,7 +75,7 @@ static int direxists (const char *dir) int attribute_hidden ___path_search (char *tmpl, size_t tmpl_len, const char *dir, const char *pfx /*, int try_tmpdir*/) { - //const char *d; + /*const char *d; */ size_t dlen, plen; if (!pfx || !pfx[0]) diff --git a/libc/string/bfin/memchr.S b/libc/string/bfin/memchr.S index 88e46bef6..26d419f7c 100644 --- a/libc/string/bfin/memchr.S +++ b/libc/string/bfin/memchr.S @@ -25,8 +25,8 @@ .weak _memchr ENTRY(_memchr) - P0 = R0; // P0 = address - P2 = R2; // P2 = count + P0 = R0; /* P0 = address */ + P2 = R2; /* P2 = count */ R1 = R1.B(Z); CC = R2 == 0; IF CC JUMP .Lfailed; diff --git a/libc/string/bfin/strcmp.S b/libc/string/bfin/strcmp.S index 12e8c53c6..ef23aa9ab 100644 --- a/libc/string/bfin/strcmp.S +++ b/libc/string/bfin/strcmp.S @@ -29,66 +29,66 @@ ENTRY(_strcmp) p1 = r0; p2 = r1; - p0 = -1; // (need for loop counter init) + p0 = -1; /* (need for loop counter init) */ - // check if byte aligned - r0 = r0 | r1; // check both pointers at same time - r0 <<= 30; // dump all but last 2 bits - cc = az; // are they zero? - if !cc jump .Lunaligned; // no; use unaligned code. - // fall-thru for aligned case.. + /* check if byte aligned */ + r0 = r0 | r1; /* check both pointers at same time */ + r0 <<= 30; /* dump all but last 2 bits */ + cc = az; /* are they zero? */ + if !cc jump .Lunaligned; /* no; use unaligned code. */ + /* fall-thru for aligned case.. */ - // note that r0 is zero from the previous... - // p0 set to -1 + /* note that r0 is zero from the previous... */ + /* p0 set to -1 */ LSETUP (.Lbeginloop, .Lendloop) lc0=p0; - // pick up first words + /* pick up first words */ r1 = [p1++]; r2 = [p2++]; - // make up mask: 0FF0FF + /* make up mask: 0FF0FF */ r7 = 0xFF; r7.h = 0xFF; - // loop : 9 cycles to check 4 characters + /* loop : 9 cycles to check 4 characters */ cc = r1 == r2; .Lbeginloop: - if !cc jump .Lnotequal4; // compare failure, exit loop + if !cc jump .Lnotequal4; /* compare failure, exit loop */ - // starting with 44332211 - // see if char 3 or char 1 is 0 - r3 = r1 & r7; // form 00330011 - // add to zero, and (r2 is free, reload) + /* starting with 44332211 */ + /* see if char 3 or char 1 is 0 */ + r3 = r1 & r7; /* form 00330011 */ + /* add to zero, and (r2 is free, reload) */ r6 = r3 +|+ r0 || r2 = [p2++] || nop; - cc = az; // true if either is zero - r3 = r1 ^ r3; // form 44002200 (4321^0301 => 4020) - // (trick, saves having another mask) - // add to zero, and (r1 is free, reload) + cc = az; /* true if either is zero */ + r3 = r1 ^ r3; /* form 44002200 (4321^0301 => 4020) */ + /* (trick, saves having another mask) */ + /* add to zero, and (r1 is free, reload) */ r6 = r3 +|+ r0 || r1 = [p1++] || nop; - cc |= az; // true if either is zero - if cc jump .Lzero4; // leave if a zero somewhere + cc |= az; /* true if either is zero */ + if cc jump .Lzero4; /* leave if a zero somewhere */ .Lendloop: cc = r1 == r2; - // loop exits -.Lnotequal4: // compare failure on 4-char compare - // address pointers are one word ahead; - // faster to use zero4 exit code + /* loop exits */ +.Lnotequal4: /* compare failure on 4-char compare */ + /* address pointers are one word ahead; */ + /* faster to use zero4 exit code */ p1 += 4; p2 += 4; -.Lzero4: // one of the bytes in word 1 is zero - // but we've already fetched the next word; so - // backup two to look at failing word again +.Lzero4: /* one of the bytes in word 1 is zero */ + /* but we've already fetched the next word; so */ + /* backup two to look at failing word again */ p1 += -8; p2 += -8; - // here when pointers are unaligned: checks one - // character at a time. Also use at the end of - // the word-check algorithm to figure out what happened + /* here when pointers are unaligned: checks one */ + /* character at a time. Also use at the end of */ + /* the word-check algorithm to figure out what happened */ .Lunaligned: - // R0 is non-zero from before. - // p0 set to -1 + /* R0 is non-zero from before. */ + /* p0 set to -1 */ r0 = 0 (Z); r1 = B[p1++] (Z); @@ -96,18 +96,18 @@ ENTRY(_strcmp) LSETUP (.Lbeginloop1, .Lendloop1) lc0=p0; .Lbeginloop1: - cc = r1; // first char must be non-zero - // chars must be the same + cc = r1; /* first char must be non-zero */ + /* chars must be the same */ r3 = r2 - r1 (NS) || r1 = B[p1++] (Z) || nop; cc &= az; - r3 = r0 - r2; // second char must be non-zero + r3 = r0 - r2; /* second char must be non-zero */ cc &= an; if !cc jump .Lexitloop1; .Lendloop1: r2 = B[p2++] (Z); -.Lexitloop1: // here means we found a zero or a difference. - // we have r2(N), p2(N), r1(N+1), p1(N+2) +.Lexitloop1: /* here means we found a zero or a difference. */ + /* we have r2(N), p2(N), r1(N+1), p1(N+2) */ r1=B[p1+ -2] (Z); r0 = r1 - r2; (r7:4) = [sp++]; diff --git a/libc/string/generic/strtok_r.c b/libc/string/generic/strtok_r.c index d082d226e..7648212f7 100644 --- a/libc/string/generic/strtok_r.c +++ b/libc/string/generic/strtok_r.c @@ -29,17 +29,17 @@ # define __rawmemchr strchr /* Experimentally off - libc_hidden_proto(strchr) */ #endif - -/* Parse S into tokens separated by characters in DELIM. +#if 0 + Parse S into tokens separated by characters in DELIM. If S is NULL, the saved pointer in SAVE_PTR is used as the next starting point. For example: char s[] = "-abc-=-def"; char *sp; - x = strtok_r(s, "-", &sp); // x = "abc", sp = "=-def" - x = strtok_r(NULL, "-=", &sp); // x = "def", sp = NULL - x = strtok_r(NULL, "=", &sp); // x = NULL - // s = "abc\0-def\0" -*/ + x = strtok_r(s, "-", &sp); /* x = "abc", sp = "=-def" */ + x = strtok_r(NULL, "-=", &sp); /* x = "def", sp = NULL */ + x = strtok_r(NULL, "=", &sp); /* x = NULL */ + /* s = "abc\0-def\0" */ +#endif char *strtok_r (char *s, const char *delim, char **save_ptr) { char *token; diff --git a/libc/string/ia64/bzero.S b/libc/string/ia64/bzero.S index d390838a6..1f0f8b7ac 100644 --- a/libc/string/ia64/bzero.S +++ b/libc/string/ia64/bzero.S @@ -47,13 +47,13 @@ #define ptr1 r28 #define ptr2 r27 #define ptr3 r26 -#define ptr9 r24 +#define ptr9 r24 #define loopcnt r23 #define linecnt r22 #define bytecnt r21 -// This routine uses only scratch predicate registers (p6 - p15) -#define p_scr p6 // default register for same-cycle branches +/* This routine uses only scratch predicate registers (p6 - p15) */ +#define p_scr p6 /* default register for same-cycle branches */ #define p_unalgn p9 #define p_y p11 #define p_n p12 @@ -65,7 +65,7 @@ #define MIN1 15 #define MIN1P1HALF 8 #define LINE_SIZE 128 -#define LSIZE_SH 7 // shift amount +#define LSIZE_SH 7 /* shift amount */ #define PREF_AHEAD 8 #define USE_FLP @@ -87,49 +87,49 @@ ENTRY(bzero) movi0 save_lc = ar.lc } { .mmi .body - mov ret0 = dest // return value + mov ret0 = dest /* return value */ nop.m 0 cmp.eq p_scr, p0 = cnt, r0 ;; } { .mmi - and ptr2 = -(MIN1+1), dest // aligned address - and tmp = MIN1, dest // prepare to check for alignment - tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U) + and ptr2 = -(MIN1+1), dest /* aligned address */ + and tmp = MIN1, dest /* prepare to check for alignment */ + tbit.nz p_y, p_n = dest, 0 /* Do we have an odd address? (M_B_U) */ } { .mib mov ptr1 = dest nop.i 0 -(p_scr) br.ret.dpnt.many rp // return immediately if count = 0 +(p_scr) br.ret.dpnt.many rp /* return immediately if count = 0 */ ;; } { .mib cmp.ne p_unalgn, p0 = tmp, r0 -} { .mib // NB: # of bytes to move is 1 - sub bytecnt = (MIN1+1), tmp // higher than loopcnt - cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? -(p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U) +} { .mib /* NB: # of bytes to move is 1 */ + sub bytecnt = (MIN1+1), tmp /* higher than loopcnt */ + cmp.gt p_scr, p0 = 16, cnt /* is it a minimalistic task? */ +(p_scr) br.cond.dptk.many .move_bytes_unaligned /* go move just a few (M_B_U) */ ;; } { .mmi -(p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment -(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment -(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ? +(p_unalgn) add ptr1 = (MIN1+1), ptr2 /* after alignment */ +(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 /* after alignment */ +(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 /* should we do a st8 ? */ ;; } { .mib (p_y) add cnt = -8, cnt -(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ? +(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 /* should we do a st4 ? */ } { .mib (p_y) st8 [ptr2] = r0,-4 (p_n) add ptr2 = 4, ptr2 ;; } { .mib (p_yy) add cnt = -4, cnt -(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ? +(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 /* should we do a st2 ? */ } { .mib (p_yy) st4 [ptr2] = r0,-2 (p_nn) add ptr2 = 2, ptr2 ;; } { .mmi - mov tmp = LINE_SIZE+1 // for compare + mov tmp = LINE_SIZE+1 /* for compare */ (p_y) add cnt = -2, cnt -(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ? +(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 /* should we do a st1 ? */ } { .mmi nop.m 0 (p_y) st2 [ptr2] = r0,-1 @@ -138,44 +138,44 @@ ENTRY(bzero) { .mmi (p_yy) st1 [ptr2] = r0 - cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? + cmp.gt p_scr, p0 = tmp, cnt /* is it a minimalistic task? */ } { .mbb (p_yy) add cnt = -1, cnt -(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few +(p_scr) br.cond.dpnt.many .fraction_of_line /* go move just a few */ ;; } { .mib - nop.m 0 + nop.m 0 shr.u linecnt = cnt, LSIZE_SH nop.b 0 ;; } .align 32 -.l1b: // ------------------// L1B: store ahead into cache lines; fill later +.l1b: /* ------------------ L1B: store ahead into cache lines; fill later */ { .mmi - and tmp = -(LINE_SIZE), cnt // compute end of range - mov ptr9 = ptr1 // used for prefetching - and cnt = (LINE_SIZE-1), cnt // remainder + and tmp = -(LINE_SIZE), cnt /* compute end of range */ + mov ptr9 = ptr1 /* used for prefetching */ + and cnt = (LINE_SIZE-1), cnt /* remainder */ } { .mmi - mov loopcnt = PREF_AHEAD-1 // default prefetch loop - cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value + mov loopcnt = PREF_AHEAD-1 /* default prefetch loop */ + cmp.gt p_scr, p0 = PREF_AHEAD, linecnt /* check against actual value */ ;; } { .mmi (p_scr) add loopcnt = -1, linecnt - add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores) - add ptr1 = tmp, ptr1 // first address beyond total range + add ptr2 = 16, ptr1 /* start of stores (beyond prefetch stores) */ + add ptr1 = tmp, ptr1 /* first address beyond total range */ ;; } { .mmi - add tmp = -1, linecnt // next loop count + add tmp = -1, linecnt /* next loop count */ movi0 ar.lc = loopcnt ;; } .pref_l1b: { .mib - stf.spill [ptr9] = f0, 128 // Do stores one cache line apart + stf.spill [ptr9] = f0, 128 /* Do stores one cache line apart */ nop.i 0 br.cloop.dptk.few .pref_l1b ;; } { .mmi - add ptr0 = 16, ptr2 // Two stores in parallel + add ptr0 = 16, ptr2 /* Two stores in parallel */ movi0 ar.lc = tmp ;; } .l1bx: @@ -190,7 +190,7 @@ ENTRY(bzero) { .mmi stf.spill [ptr2] = f0, 32 stf.spill [ptr0] = f0, 64 - cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? + cmp.lt p_scr, p0 = ptr9, ptr1 /* do we need more prefetching? */ ;; } { .mmb stf.spill [ptr2] = f0, 32 @@ -198,14 +198,14 @@ ENTRY(bzero) br.cloop.dptk.few .l1bx ;; } { .mib - cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? + cmp.gt p_scr, p0 = 8, cnt /* just a few bytes left ? */ (p_scr) br.cond.dpnt.many .move_bytes_from_alignment ;; } .fraction_of_line: { .mib add ptr2 = 16, ptr1 - shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32 + shr.u loopcnt = cnt, 5 /* loopcnt = cnt / 32 */ ;; } { .mib cmp.eq p_scr, p0 = loopcnt, r0 @@ -213,11 +213,11 @@ ENTRY(bzero) (p_scr) br.cond.dpnt.many .store_words ;; } { .mib - and cnt = 0x1f, cnt // compute the remaining cnt + and cnt = 0x1f, cnt /* compute the remaining cnt */ movi0 ar.lc = loopcnt ;; } .align 32 -.l2: // -----------------------------// L2A: store 32B in 2 cycles +.l2: /* ----------------------------- L2A: store 32B in 2 cycles */ { .mmb store [ptr1] = myval, 8 store [ptr2] = myval, 8 @@ -228,38 +228,38 @@ ENTRY(bzero) ;; } .store_words: { .mib - cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? -(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch + cmp.gt p_scr, p0 = 8, cnt /* just a few bytes left ? */ +(p_scr) br.cond.dpnt.many .move_bytes_from_alignment /* Branch */ ;; } { .mmi - store [ptr1] = myval, 8 // store - cmp.le p_y, p_n = 16, cnt // - add cnt = -8, cnt // subtract + store [ptr1] = myval, 8 /* store */ + cmp.le p_y, p_n = 16, cnt /* */ + add cnt = -8, cnt /* subtract */ ;; } { .mmi -(p_y) store [ptr1] = myval, 8 // store +(p_y) store [ptr1] = myval, 8 /* store */ (p_y) cmp.le.unc p_yy, p_nn = 16, cnt -(p_y) add cnt = -8, cnt // subtract +(p_y) add cnt = -8, cnt /* subtract */ ;; } -{ .mmi // store +{ .mmi /* store */ (p_yy) store [ptr1] = myval, 8 -(p_yy) add cnt = -8, cnt // subtract +(p_yy) add cnt = -8, cnt /* subtract */ ;; } .move_bytes_from_alignment: { .mib cmp.eq p_scr, p0 = cnt, r0 - tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ? + tbit.nz.unc p_y, p0 = cnt, 2 /* should we terminate with a st4 ? */ (p_scr) br.cond.dpnt.few .restore_and_exit ;; } { .mib (p_y) st4 [ptr1] = r0,4 - tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ? + tbit.nz.unc p_yy, p0 = cnt, 1 /* should we terminate with a st2 ? */ ;; } { .mib (p_yy) st2 [ptr1] = r0,2 - tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ? + tbit.nz.unc p_y, p0 = cnt, 0 /* should we terminate with a st1 ? */ ;; } { .mib @@ -281,38 +281,38 @@ ENTRY(bzero) (p_n) add ptr2 = 2, ptr1 } { .mmi (p_y) add ptr2 = 3, ptr1 -(p_y) st1 [ptr1] = r0, 1 // fill 1 (odd-aligned) byte -(p_y) add cnt = -1, cnt // [15, 14 (or less) left] +(p_y) st1 [ptr1] = r0, 1 /* fill 1 (odd-aligned) byte */ +(p_y) add cnt = -1, cnt /* [15, 14 (or less) left] */ ;; } { .mmi (p_yy) cmp.le.unc p_y, p0 = 8, cnt - add ptr3 = ptr1, cnt // prepare last store + add ptr3 = ptr1, cnt /* prepare last store */ movi0 ar.lc = save_lc } { .mmi -(p_yy) st2 [ptr1] = r0, |