summaryrefslogtreecommitdiff
path: root/libc/string/arm/memset.S
diff options
context:
space:
mode:
authorBernhard Reutner-Fischer <rep.dot.nop@gmail.com>2008-03-26 13:40:36 +0000
committerBernhard Reutner-Fischer <rep.dot.nop@gmail.com>2008-03-26 13:40:36 +0000
commitefce79f09ae6daa77cd322df0d532beec3f445f5 (patch)
treeae936850c5671b8bea0abf0d33bf2196f7abc796 /libc/string/arm/memset.S
parent17e961d9c708ab202760ce830f8efe73e91bb129 (diff)
Paul Brook writes:
The attached patch adds support for compiling arm uClibc as pure Thumb code. This is needed because some recent ARM codes do not implement traditional ARM mode. Specifically: * Cortex-M1 - An extremely minimal FPGA based core that only implements Thumb-1 (aka ARMv6-M). * Cortex-M3 - A Thumb-2 only ARMv7-M core. Most of uClibc already builds in Thumb mode, all that is left are a handful of assembly bits. Tested on arm-uclinuxeabi.
Diffstat (limited to 'libc/string/arm/memset.S')
-rw-r--r--libc/string/arm/memset.S62
1 files changed, 62 insertions, 0 deletions
diff --git a/libc/string/arm/memset.S b/libc/string/arm/memset.S
index 16bfe0dc5..66aa6039c 100644
--- a/libc/string/arm/memset.S
+++ b/libc/string/arm/memset.S
@@ -19,12 +19,52 @@
#include <features.h>
#include <sys/syscall.h>
+#include <bits/arm_asm.h>
.text
.global memset
.type memset,%function
.align 4
+#if defined(THUMB1_ONLY)
+.thumb_func
+memset:
+ mov ip, r0
+ cmp r2, #8 @ at least 8 bytes to do?
+ bcc 2f
+
+ lsl r3, r1, #8
+ orr r1, r3
+ lsl r3, r1, #16
+ orr r1, r3
+
+ mov r3, #3
+1: @ Fill up to the first word boundary
+ tst r0, r3
+ beq 1f
+ strb r1, [r0]
+ add r0, r0, #1
+ sub r2, r2, #1
+ b 1b
+1: @ Fill aligned words
+ str r1, [r0]
+ add r0, r0, #4
+ sub r2, r2, #4
+ cmp r2, #4
+ bcs 1b
+
+2: @ Fill the remaining bytes
+ cmp r2, #0
+ beq 2f
+1:
+ strb r1, [r0]
+ add r0, r0, #1
+ sub r2, r2, #1
+ bne 1b
+2:
+ mov r0, ip
+ bx lr
+#else
memset:
mov a4, a1
cmp a3, $8 @ at least 8 bytes to do?
@@ -33,8 +73,14 @@ memset:
orr a2, a2, a2, lsl $16
1:
tst a4, $3 @ aligned yet?
+#if defined(__thumb2__)
+ itt ne
+ strbne a2, [a4], $1
+ subne a3, a3, $1
+#else
strneb a2, [a4], $1
subne a3, a3, $1
+#endif
bne 1b
mov ip, a2
1:
@@ -51,16 +97,30 @@ memset:
stmia a4!, {a2, ip}
sub a3, a3, $8
cmp a3, $8 @ 8 bytes still to do?
+#if defined(__thumb2__)
+ itt ge
+ stmiage a4!, {a2, ip}
+ subge a3, a3, $8
+#else
stmgeia a4!, {a2, ip}
subge a3, a3, $8
+#endif
bge 1b
2:
movs a3, a3 @ anything left?
+ IT(t, eq)
#if defined(__USE_BX__)
bxeq lr
#else
moveq pc, lr @ nope
#endif
+#if defined (__thumb2__)
+1:
+ strb a2, [a4], #1
+ subs a3, a3, #1
+ bne 1b
+ bx lr
+#else
rsb a3, a3, $7
add pc, pc, a3, lsl $2
mov r0, r0
@@ -76,6 +136,8 @@ memset:
#else
mov pc, lr
#endif
+#endif
+#endif
.size memset,.-memset