diff options
Diffstat (limited to 'libc/string/xtensa/memcpy.S')
| -rw-r--r-- | libc/string/xtensa/memcpy.S | 45 |
1 files changed, 22 insertions, 23 deletions
diff --git a/libc/string/xtensa/memcpy.S b/libc/string/xtensa/memcpy.S index 19f3a6818..244205611 100644 --- a/libc/string/xtensa/memcpy.S +++ b/libc/string/xtensa/memcpy.S @@ -13,11 +13,10 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 51 Franklin Street - Fifth Floor, - Boston, MA 02110-1301, USA. */ + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ -#include "../../sysdeps/linux/xtensa/sysdep.h" +#include <sysdep.h> #include <bits/xtensa-config.h> .macro src_b r, w0, w1 @@ -83,7 +82,7 @@ __memcpy_aux: loopnez a4, 2f #else beqz a4, 2f - add a7, a3, a4 // a7 = end address for source + add a7, a3, a4 /* a7 = end address for source */ #endif 1: l8ui a6, a3, 0 addi a3, a3, 1 @@ -92,13 +91,13 @@ __memcpy_aux: #if !XCHAL_HAVE_LOOPS blt a3, a7, 1b #endif -2: retw +2: abi_ret /* Destination is unaligned. */ .align 4 -.Ldst1mod2: // dst is only byte aligned +.Ldst1mod2: /* dst is only byte aligned */ /* Do short copies byte-by-byte. */ _bltui a4, 7, .Lbytecopy @@ -113,7 +112,7 @@ __memcpy_aux: /* Return to main algorithm if dst is now aligned. */ _bbci.l a5, 1, .Ldstaligned -.Ldst2mod4: // dst has 16-bit alignment +.Ldst2mod4: /* dst has 16-bit alignment */ /* Do short copies byte-by-byte. */ _bltui a4, 6, .Lbytecopy @@ -134,7 +133,7 @@ __memcpy_aux: ENTRY (memcpy) /* a2 = dst, a3 = src, a4 = len */ - mov a5, a2 // copy dst so that a2 is return value + mov a5, a2 /* copy dst so that a2 is return value */ _bbsi.l a2, 0, .Ldst1mod2 _bbsi.l a2, 1, .Ldst2mod4 .Ldstaligned: @@ -152,7 +151,7 @@ ENTRY (memcpy) #else beqz a7, 2f slli a8, a7, 4 - add a8, a8, a3 // a8 = end of last 16B source chunk + add a8, a8, a3 /* a8 = end of last 16B source chunk */ #endif 1: l32i a6, a3, 0 l32i a7, a3, 4 @@ -182,7 +181,7 @@ ENTRY (memcpy) 3: bbsi.l a4, 2, 4f bbsi.l a4, 1, 5f bbsi.l a4, 0, 6f - retw + abi_ret /* Copy 4 bytes. */ 4: l32i a6, a3, 0 @@ -191,7 +190,7 @@ ENTRY (memcpy) addi a5, a5, 4 bbsi.l a4, 1, 5f bbsi.l a4, 0, 6f - retw + abi_ret /* Copy 2 bytes. */ 5: l16ui a6, a3, 0 @@ -199,14 +198,14 @@ ENTRY (memcpy) s16i a6, a5, 0 addi a5, a5, 2 bbsi.l a4, 0, 6f - retw + abi_ret /* Copy 1 byte. */ 6: l8ui a6, a3, 0 s8i a6, a5, 0 .Ldone: - retw + abi_ret /* Destination is aligned; source is unaligned. */ @@ -218,18 +217,18 @@ ENTRY (memcpy) /* Copy 16 bytes per iteration for word-aligned dst and unaligned src. */ - ssa8 a3 // set shift amount from byte offset + ssa8 a3 /* set shift amount from byte offset */ #if UNALIGNED_ADDRESSES_CHECKED - and a11, a3, a8 // save unalignment offset for below - sub a3, a3, a11 // align a3 + and a11, a3, a8 /* save unalignment offset for below */ + sub a3, a3, a11 /* align a3 */ #endif - l32i a6, a3, 0 // load first word + l32i a6, a3, 0 /* load first word */ #if XCHAL_HAVE_LOOPS loopnez a7, 2f #else beqz a7, 2f slli a10, a7, 4 - add a10, a10, a3 // a10 = end of last 16B source chunk + add a10, a10, a3 /* a10 = end of last 16B source chunk */ #endif 1: l32i a7, a3, 4 l32i a8, a3, 8 @@ -273,11 +272,11 @@ ENTRY (memcpy) mov a6, a7 4: #if UNALIGNED_ADDRESSES_CHECKED - add a3, a3, a11 // readjust a3 with correct misalignment + add a3, a3, a11 /* readjust a3 with correct misalignment */ #endif bbsi.l a4, 1, 5f bbsi.l a4, 0, 6f - retw + abi_ret /* Copy 2 bytes. */ 5: l8ui a6, a3, 0 @@ -287,11 +286,11 @@ ENTRY (memcpy) s8i a7, a5, 1 addi a5, a5, 2 bbsi.l a4, 0, 6f - retw + abi_ret /* Copy 1 byte. */ 6: l8ui a6, a3, 0 s8i a6, a5, 0 - retw + abi_ret libc_hidden_def (memcpy) |
