summaryrefslogtreecommitdiff
path: root/libc/string/x86_64/memset.S
diff options
context:
space:
mode:
authorDenis Vlasenko <vda.linux@googlemail.com>2007-07-29 14:45:26 +0000
committerDenis Vlasenko <vda.linux@googlemail.com>2007-07-29 14:45:26 +0000
commit519ca27d4a720c900ad74e8018cdd5cc08a8b3a6 (patch)
tree15e4700c597a8932ef07a70f09ebfb848980e46e /libc/string/x86_64/memset.S
parent2ea436fb13abd2793dc39cca24a8f90d8f3b6328 (diff)
Remove stray code alignment (.align 16 and .align 4 directives)
from i386 and x86_64.
Diffstat (limited to 'libc/string/x86_64/memset.S')
-rw-r--r--libc/string/x86_64/memset.S8
1 files changed, 4 insertions, 4 deletions
diff --git a/libc/string/x86_64/memset.S b/libc/string/x86_64/memset.S
index d72d74468..0f054ade6 100644
--- a/libc/string/x86_64/memset.S
+++ b/libc/string/x86_64/memset.S
@@ -71,7 +71,7 @@ ENTRY (memset)
jae 11f
.p2align 4
-3: /* Copy 64 bytes. */
+3: /* Fill 64 bytes. */
mov %r8,(%rcx)
mov %r8,0x8(%rcx)
mov %r8,0x10(%rcx)
@@ -84,7 +84,7 @@ ENTRY (memset)
dec %rax
jne 3b
-4: /* Copy final bytes. */
+4: /* Fill final bytes. */
and $0x3f,%edx
mov %rdx,%rax
shr $0x3,%rax
@@ -107,7 +107,7 @@ ENTRY (memset)
jne 8b
9:
#if BZERO_P
- nop
+ nop /* huh?? */
#else
/* Load result (only if used as memset). */
mov %rdi,%rax /* start address of destination is result */
@@ -115,7 +115,7 @@ ENTRY (memset)
retq
.p2align 4
-11: /* Copy 64 bytes without polluting the cache. */
+11: /* Fill 64 bytes without polluting the cache. */
/* We could use movntdq %xmm0,(%rcx) here to further
speed up for large cases but let's not use XMM registers. */
movnti %r8,(%rcx)