krait optimizations from caf

libc: krait: Use performance version of memcpy.

Change-Id: I47d25d1da5b1a96bbc1b60f8acdaa31721a68e73

 This is the 2nd commit message:

libc: krait: Use performance version of bcopy and memmove

Change-Id: I587085aeb0c30ceccaa3f420594a194b129632b5

 This is the 3rd commit message:

libc: krait: Restore prior version of strcmp.

Change-Id: I8ddcdb4d3a905dd746985435dcdb525ab5a1c947

 This is the 4th commit message:

Bionic/libm: fast neon pow() for small x,y.

Add a fast neon version of pow() suitable for relatively small
positive x and y (between 0 and 4).  Run the standard
implementation in all other cases.  Gives approximately 60%
performance improvement to AnTuTu FPU scores

Change-Id: I97e0712daeb2740764b26a44be0caaa39c481453

 This is the 5th commit message:

Bionic/libm: Prefer branches and VFP ABI.

For internal functions set gcc attribute "aapcs-vfp" for ARM
and use -fno-if-conversion to prefer branches over predicated
instructions (improves performance on architectures with good
branch prediction).

Change-Id: I8424e0e82a19d35e7e3b6e3e122dcdecdd5426fd

 This is the 6th commit message:

Bionic/libm: add assembly versions of sin/cos.

Add assembly versions of sin/cos with integrated remainder pi/2
calculation.  Directly extracted from binary libm.so compiled with
__ieee754_rem_pio2 calls inlined.

Change-Id: Ia093f420e58e794635e3a5f09e8236ae7601f1f6

 This is the 7th commit message:

Bionic/libm: Pow optimizations and bug fixes.

Use VFP calling convention for pow_neon handoff function by default.
Fix register usage collision between two different polynomial
coefficients in pow_neon.  Remove conditional execution in pow_neon
and replace with branching.

Change-Id: I76095f4a006e2fb01a53943b66fd69bfa1fd3033

 This is the 8th commit message:

Bionic/libm: Add precision-correct de-serialize sin/cos.

Modify sin/cos to improve performance while retaining bit-for-bit
accuracy with existing algorithm.

Change-Id: Iaba2dd731cd015732744705dad8bddb713b43067

 This is the 9th commit message:

libm: optimistic pow calling.

Call optimized pow optimistically and revert to full range
implementation if we detect an out-of-range input.

Change-Id: I6f3aa734adbf99484b7ff70736ef83a41e5815b8

 This is the 10th commit message:

libm: sqrt and sqrtf via ARM vsqrt instruction

Optimized sqrt and sqrtf for arm by using hardware
opcode for sqrt rather than generic slow portable
code.

Change-Id: I84694159577aef6418710548085d8149c45e0e3f

 This is the 11th commit message:

Bionic/libm: Remove extra vmov from sin/cos.

Move integer representations of x bits on the integer side rather
than moving them to and from the FP registers.

Change-Id: I1895db385c9616cdae9ab6403f392dfbae292adc

 This is the 12th commit message:

Bionic/libm: enable optimized math functions on more QCOM targets.

allow more QCOM targets to use optimized math functions.

Change-Id: I76ee1bf951ae1c8397fef3af6e9937ed8cad9b62

 This is the 13th commit message:

libc: krait: Perform all memmoves as word-wise at minimum

Certain transfers of very small sizes or that end by copying a small
remnant were moving memory a byte at a time.  Perform these using word
operations if 2 or more bytes remain.

Change-Id: Ie0aef8927f7bd452509913d7368e5c71b7be4720

 This is the 14th commit message:

Revert "libc: krait: Perform all memmoves as word-wise at minimum"
This reverts commit 1fae6403c2c37f21a740c54b8f5451777b6affe1.

Change-Id: Icb80126042c17dc234ce7a6282846ca344db4ee1

 This is the 15th commit message:

libm: krait: Fix build warning for e_pow.o

e_pow.o given more than once in the same rule. Remove extra arm/e_pow.S
reference in libm/Android.mk.
diff --git a/libc/Android.mk b/libc/Android.mk
index 22ed2d8..6ef5af9 100644
--- a/libc/Android.mk
+++ b/libc/Android.mk
@@ -366,9 +366,9 @@
 # =========================================================
 ifeq ($(TARGET_ARCH),arm)
 libc_common_src_files += \
-	bionic/memmove.c.arm \
-	string/bcopy.c \
 	string/strncmp.c \
+#	bionic/memmove.c.arm \
+#	string/bcopy.c \
 
 # These files need to be arm so that gdbserver
 # can set breakpoints in them without messing
diff --git a/libc/arch-arm/generic/generic.mk b/libc/arch-arm/generic/generic.mk
index 358b1e6..6dfeb98 100644
--- a/libc/arch-arm/generic/generic.mk
+++ b/libc/arch-arm/generic/generic.mk
@@ -1,3 +1,5 @@
 $(call libc-add-cpu-variant-src,MEMCPY,arch-arm/generic/bionic/memcpy.S)
 $(call libc-add-cpu-variant-src,MEMSET,arch-arm/generic/bionic/memset.S)
 $(call libc-add-cpu-variant-src,STRCMP,arch-arm/generic/bionic/strcmp.S)
+$(call libc-add-cpu-variant-src,MEMMOVE,bionic/memmove.c.arm)
+$(call libc-add-cpu-variant-src,BCOPY,string/bcopy.c.arm)
diff --git a/libc/arch-arm/krait/bionic/memcpy.S b/libc/arch-arm/krait/bionic/memcpy.S
index 0cd4d44..818c3a4 100644
--- a/libc/arch-arm/krait/bionic/memcpy.S
+++ b/libc/arch-arm/krait/bionic/memcpy.S
@@ -26,121 +26,185 @@
  * SUCH DAMAGE.
  */
 
-/* Assumes neon instructions and a cache line size of 32 bytes. */
+/* Assumes neon instructions and a cache line size of 64 bytes. */
 
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
 
 /*
- * This code assumes it is running on a processor that supports all arm v7
- * instructions, that supports neon instructions, and that has a 32 byte
- * cache line.
+ * These can be overridden in:
+ *   device/<vendor>/<board>/BoardConfig.mk
+ * by setting the following:
+ *   TARGET_USE_KRAIT_BIONIC_OPTIMIZATION := true
+ *   TARGET_USE_KRAIT_PLD_SET := true
+ *   TARGET_KRAIT_BIONIC_PLDOFFS := <pldoffset>
+ *   TARGET_KRAIT_BIONIC_PLDSIZE := <pldsize>
+ *   TARGET_KRAIT_BIONIC_PLDTHRESH := <pldthreshold>
+ *   TARGET_KRAIT_BIONIC_BBTHRESH := <bbthreshold>
  */
 
-        .text
-        .fpu    neon
+#ifndef PLDOFFS
+#define PLDOFFS	(10)
+#endif
+#ifndef PLDTHRESH
+#define PLDTHRESH (PLDOFFS)
+#endif
+#ifndef BBTHRESH
+#define BBTHRESH (4096/64)
+#endif
+#if (PLDOFFS < 1)
+#error Routine does not support offsets less than 1
+#endif
+#if (PLDTHRESH < PLDOFFS)
+#error PLD threshold must be greater than or equal to the PLD offset
+#endif
+#ifndef PLDSIZE
+#define PLDSIZE	(64)
+#endif
+#define NOP_OPCODE       (0xe320f000)
 
-#define CACHE_LINE_SIZE     32
+	.text
+	.fpu    neon
 
 ENTRY(memcpy)
-        .save       {r0, lr}
-        /* start preloading as early as possible */
-        pld         [r1, #(CACHE_LINE_SIZE*0)]
-        stmfd       sp!, {r0, lr}
-        pld         [r1, #(CACHE_LINE_SIZE*2)]
+	.save	{r0, lr}
+	mov	r12, r0
+	cmp	r2, #4
+	blt	.Lneon_lt4
+	cmp	r2, #16
+	blt	.Lneon_lt16
+	cmp	r2, #32
+	blt	.Lneon_16
+	cmp	r2, #64
+	blt	.Lneon_copy_32_a
+	stmfd	sp!, {r0}
 
-        /* do we have at least 16-bytes to copy (needed for alignment below) */
-        cmp         r2, #16
-        blo         5f
+	mov	r12, r2, lsr #6
+	cmp	r12, #PLDTHRESH
+	ble	.Lneon_copy_64_loop_nopld
 
-        /* align destination to cache-line for the write-buffer */
-        rsb         r3, r0, #0
-        ands        r3, r3, #0xF
-        beq         0f
+	stmfd	sp!, {r9, r10, lr}
 
-        /* copy up to 15-bytes (count in r3) */
-        sub         r2, r2, r3
-        movs        ip, r3, lsl #31
-        ldrmib      lr, [r1], #1
-        strmib      lr, [r0], #1
-        ldrcsb      ip, [r1], #1
-        ldrcsb      lr, [r1], #1
-        strcsb      ip, [r0], #1
-        strcsb      lr, [r0], #1
-        movs        ip, r3, lsl #29
-        bge         1f
-        // copies 4 bytes, destination 32-bits aligned
-        vld4.8      {d0[0], d1[0], d2[0], d3[0]}, [r1]!
-        vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
-1:      bcc         2f
-        // copies 8 bytes, destination 64-bits aligned
-        vld1.8      {d0}, [r1]!
-        vst1.8      {d0}, [r0, :64]!
-2:
+	cmp	r12, #BBTHRESH
+	ble	.Lneon_prime_pump
 
-0:      /* preload immediately the next cache line, which we may need */
-        pld         [r1, #(CACHE_LINE_SIZE*0)]
-        pld         [r1, #(CACHE_LINE_SIZE*2)]
+	add	lr, r0, #0x400
+	add	r9, r1, #(PLDOFFS*PLDSIZE)
+	sub	lr, lr, r9
+	lsl	lr, lr, #21
+	lsr	lr, lr, #21
+	add	lr, lr, #(PLDOFFS*PLDSIZE)
+	cmp	r12, lr, lsr #6
+	movle	lr, #(PLDOFFS*PLDSIZE)
+	ble	.Lneon_prime_pump
 
-        /* make sure we have at least 64 bytes to copy */
-        subs        r2, r2, #64
-        blo         2f
+	movgt	r9, #(PLDOFFS)
+	rsbgts	r9, r9, lr, lsr #6
+	ble	.Lneon_prime_pump
 
-        /* Preload all the cache lines we need.
-         * NOTE: The number of pld below depends on CACHE_LINE_SIZE,
-         * ideally we would increase the distance in the main loop to
-         * avoid the goofy code below. In practice this doesn't seem to make
-         * a big difference.
-         * NOTE: The value CACHE_LINE_SIZE * 8 was chosen through
-         * experimentation.
-         */
-        pld         [r1, #(CACHE_LINE_SIZE*4)]
-        pld         [r1, #(CACHE_LINE_SIZE*6)]
-        pld         [r1, #(CACHE_LINE_SIZE*8)]
+	add	r10, r1, lr
+	bic	r10, #0x3F
 
-1:      /* The main loop copies 64 bytes at a time */
-        vld1.8      {d0  - d3},   [r1]!
-        vld1.8      {d4  - d7},   [r1]!
-        pld         [r1, #(CACHE_LINE_SIZE*8)]
-        subs        r2, r2, #64
-        vst1.8      {d0  - d3},   [r0, :128]!
-        vst1.8      {d4  - d7},   [r0, :128]!
-        bhs         1b
+	sub	r12, lr, lsr #6
+	cmp	r9, r12
+	suble	r12, r12, r9
+	movgt	r9, r12
+	movgt	r12, #0
 
-2:      /* fix-up the remaining count and make sure we have >= 32 bytes left */
-        add         r2, r2, #64
-        subs        r2, r2, #32
-        blo         4f
+	pld	[r1, #((PLDOFFS-1)*PLDSIZE)]
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_copy_64_loop_outer_doublepld:
+	pld	[r1, #((PLDOFFS)*PLDSIZE)]
+	vld1.32	{q0, q1}, [r1]!
+	vld1.32	{q2, q3}, [r1]!
+	ldr	r3, [r10]
+	subs	r9, r9, #1
+	vst1.32	{q0, q1}, [r0]!
+	vst1.32	{q2, q3}, [r0]!
+	add	r10, #64
+	bne	.Lneon_copy_64_loop_outer_doublepld
+	cmp	r12, #0
+	beq	.Lneon_pop_before_nopld
 
-3:      /* 32 bytes at a time. These cache lines were already preloaded */
-        vld1.8      {d0 - d3},  [r1]!
-        subs        r2, r2, #32
-        vst1.8      {d0 - d3},  [r0, :128]!
-        bhs         3b
-4:      /* less than 32 left */
-        add         r2, r2, #32
-        tst         r2, #0x10
-        beq         5f
-        // copies 16 bytes, 128-bits aligned
-        vld1.8      {d0, d1}, [r1]!
-        vst1.8      {d0, d1}, [r0, :128]!
+	cmp	r12, #(512*1024/64)
+	blt	.Lneon_copy_64_loop_outer
 
-5:      /* copy up to 15-bytes (count in r2) */
-        movs        ip, r2, lsl #29
-        bcc         1f
-        vld1.8      {d0}, [r1]!
-        vst1.8      {d0}, [r0]!
-1:      bge         2f
-        vld4.8      {d0[0], d1[0], d2[0], d3[0]}, [r1]!
-        vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r0]!
-2:      movs        ip, r2, lsl #31
-        ldrmib      r3, [r1], #1
-        ldrcsb      ip, [r1], #1
-        ldrcsb      lr, [r1], #1
-        strmib      r3, [r0], #1
-        strcsb      ip, [r0], #1
-        strcsb      lr, [r0], #1
+	.balignl 64, NOP_OPCODE, 8
+.Lneon_copy_64_loop_ddr:
+	vld1.32	{q0, q1}, [r1]!
+	vld1.32	{q2, q3}, [r1]!
+	pld	[r10]
+	subs	r12, r12, #1
+	vst1.32	{q0, q1}, [r0]!
+	vst1.32	{q2, q3}, [r0]!
+	add	r10, #64
+	bne	.Lneon_copy_64_loop_ddr
+	b	.Lneon_pop_before_nopld
 
-        ldmfd       sp!, {r0, lr}
-        bx          lr
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_prime_pump:
+	mov	lr, #(PLDOFFS*PLDSIZE)
+	add	r10, r1, #(PLDOFFS*PLDSIZE)
+	bic	r10, #0x3F
+	sub	r12, r12, #PLDOFFS
+	ldr	r3, [r10, #(-1*PLDSIZE)]
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_copy_64_loop_outer:
+	vld1.32	{q0, q1}, [r1]!
+	vld1.32	{q2, q3}, [r1]!
+	ldr	r3, [r10]
+	subs	r12, r12, #1
+	vst1.32	{q0, q1}, [r0]!
+	vst1.32	{q2, q3}, [r0]!
+	add	r10, #64
+	bne	.Lneon_copy_64_loop_outer
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_pop_before_nopld:
+	mov	r12, lr, lsr #6
+	ldmfd	sp!, {r9, r10, lr}
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_copy_64_loop_nopld:
+	vld1.32	{q8, q9}, [r1]!
+	vld1.32	{q10, q11}, [r1]!
+	subs	r12, r12, #1
+	vst1.32	{q8, q9}, [r0]!
+	vst1.32	{q10, q11}, [r0]!
+	bne	.Lneon_copy_64_loop_nopld
+	ands	r2, r2, #0x3f
+	ldmfd	sp!, {r12}
+	beq	.Lneon_exit
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_copy_32_a:
+	movs	r3, r2, lsl #27
+	bcc	.Lneon_16
+	vld1.32	{q0,q1}, [r1]!
+	vst1.32	{q0,q1}, [r0]!
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_16:
+	bpl	.Lneon_lt16
+	vld1.32	{q8}, [r1]!
+	vst1.32	{q8}, [r0]!
+	ands	r2, r2, #0x0f
+	beq	.Lneon_exit
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_lt16:
+	movs	r3, r2, lsl #29
+	ldrcs	r3, [r1], #4
+	strcs	r3, [r0], #4
+	ldrcs	r3, [r1], #4
+	strcs	r3, [r0], #4
+	ldrmi	r3, [r1], #4
+	strmi	r3, [r0], #4
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_lt4:
+	movs	r2, r2, lsl #31
+	ldrcsh	r3, [r1], #2
+	strcsh	r3, [r0], #2
+	ldrmib	r3, [r1]
+	strmib	r3, [r0]
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_exit:
+	mov	r0, r12
+	bx	lr
 END(memcpy)
+
diff --git a/libc/arch-arm/krait/bionic/memmove.S b/libc/arch-arm/krait/bionic/memmove.S
new file mode 100644
index 0000000..349c8e3
--- /dev/null
+++ b/libc/arch-arm/krait/bionic/memmove.S
@@ -0,0 +1,209 @@
+/***************************************************************************
+ Copyright (c) 2009-2013 The Linux Foundation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+     * Redistributions of source code must retain the above copyright
+       notice, this list of conditions and the following disclaimer.
+     * Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the distribution.
+     * Neither the name of The Linux Foundation nor the names of its contributors may
+       be used to endorse or promote products derived from this software
+       without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+  ***************************************************************************/
+
+/***************************************************************************
+ *  Neon memmove: Attempts to do a memmove with Neon registers if possible,
+ *     Inputs:
+ *        dest: The destination buffer
+ *        src: The source buffer
+ *        n: The size of the buffer to transfer
+ *     Outputs:
+ *
+ ***************************************************************************/
+
+#include <machine/cpu-features.h>
+
+/*
+ * These can be overridden in:
+ *   device/<vendor>/<board>/BoardConfig.mk
+ * by setting the following:
+ *   TARGET_USE_KRAIT_BIONIC_OPTIMIZATION := true
+ *   TARGET_USE_KRAIT_PLD_SET := true
+ *   TARGET_KRAIT_BIONIC_PLDOFFS := <pldoffset>
+ *   TARGET_KRAIT_BIONIC_PLDSIZE := <pldsize>
+ *   TARGET_KRAIT_BIONIC_PLDTHRESH := <pldthreshold>
+ */
+#ifndef PLDOFFS
+#define PLDOFFS	(10)
+#endif
+#ifndef PLDTHRESH
+#define PLDTHRESH (PLDOFFS)
+#endif
+#if (PLDOFFS < 5)
+#error Routine does not support offsets less than 5
+#endif
+#if (PLDTHRESH < PLDOFFS)
+#error PLD threshold must be greater than or equal to the PLD offset
+#endif
+#ifndef PLDSIZE
+#define PLDSIZE (64)
+#endif
+#define NOP_OPCODE (0xe320f000)
+
+	.code 32
+	.align 5
+	.global memmove
+	.type memmove, %function
+
+	.global _memmove_words
+	.type _memmove_words, %function
+
+	.global bcopy
+	.type bcopy, %function
+
+bcopy:
+	mov	r12, r0
+	mov	r0, r1
+	mov	r1, r12
+	.balignl 64, NOP_OPCODE, 4*2
+memmove:
+_memmove_words:
+.Lneon_memmove_cmf:
+	subs	r12, r0, r1
+	bxeq	lr
+	cmphi	r2, r12
+	bls	memcpy	/* Use memcpy for non-overlapping areas */
+
+	push	{r0}
+
+.Lneon_back_to_front_copy:
+	add	r0, r0, r2
+	add	r1, r1, r2
+	cmp	r2, #4
+	bgt	.Lneon_b2f_gt4
+	cmp	r2, #0
+.Lneon_b2f_smallcopy_loop:
+	beq	.Lneon_memmove_done
+	ldrb	r12, [r1, #-1]!
+	subs	r2, r2, #1
+	strb	r12, [r0, #-1]!
+	b	.Lneon_b2f_smallcopy_loop
+.Lneon_b2f_gt4:
+	sub	r3, r0, r1
+	cmp	r2, r3
+	movle	r12, r2
+	movgt	r12, r3
+	cmp	r12, #64
+	bge	.Lneon_b2f_copy_64
+	cmp	r12, #32
+	bge	.Lneon_b2f_copy_32
+	cmp	r12, #8
+	bge	.Lneon_b2f_copy_8
+	cmp	r12, #4
+	bge	.Lneon_b2f_copy_4
+	b	.Lneon_b2f_copy_1
+.Lneon_b2f_copy_64:
+	sub	r1, r1, #64	/* Predecrement */
+	sub	r0, r0, #64
+	movs	r12, r2, lsr #6
+	cmp	r12, #PLDTHRESH
+	ble	.Lneon_b2f_copy_64_loop_nopld
+	sub	r12, #PLDOFFS
+	pld	[r1, #-(PLDOFFS-5)*PLDSIZE]
+	pld	[r1, #-(PLDOFFS-4)*PLDSIZE]
+	pld	[r1, #-(PLDOFFS-3)*PLDSIZE]
+	pld	[r1, #-(PLDOFFS-2)*PLDSIZE]
+	pld	[r1, #-(PLDOFFS-1)*PLDSIZE]
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_b2f_copy_64_loop_outer:
+	pld	[r1, #-(PLDOFFS)*PLDSIZE]
+	vld1.32	{q0, q1}, [r1]!
+	vld1.32	{q2, q3}, [r1]
+	subs	r12, r12, #1
+	vst1.32	{q0, q1}, [r0]!
+	sub	r1, r1, #96	/* Post-fixup and predecrement */
+	vst1.32	{q2, q3}, [r0]
+	sub	r0, r0, #96
+	bne	.Lneon_b2f_copy_64_loop_outer
+	mov	r12, #PLDOFFS
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_b2f_copy_64_loop_nopld:
+	vld1.32	{q8, q9}, [r1]!
+	vld1.32	{q10, q11}, [r1]
+	subs	r12, r12, #1
+	vst1.32	{q8, q9}, [r0]!
+	sub	r1, r1, #96	/* Post-fixup and predecrement */
+	vst1.32	{q10, q11}, [r0]
+	sub	r0, r0, #96
+	bne	.Lneon_b2f_copy_64_loop_nopld
+	ands	r2, r2, #0x3f
+	beq	.Lneon_memmove_done
+	add	r1, r1, #64	/* Post-fixup */
+	add	r0, r0, #64
+	cmp	r2, #32
+	blt	.Lneon_b2f_copy_finish
+.Lneon_b2f_copy_32:
+	mov	r12, r2, lsr #5
+.Lneon_b2f_copy_32_loop:
+	sub	r1, r1, #32	/* Predecrement */
+	sub	r0, r0, #32
+	vld1.32	{q0,q1}, [r1]
+	subs	r12, r12, #1
+	vst1.32	{q0,q1}, [r0]
+	bne	.Lneon_b2f_copy_32_loop
+	ands	r2, r2, #0x1f
+	beq	.Lneon_memmove_done
+.Lneon_b2f_copy_finish:
+.Lneon_b2f_copy_8:
+	movs	r12, r2, lsr #0x3
+	beq	.Lneon_b2f_copy_4
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_b2f_copy_8_loop:
+	sub	r1, r1, #8	/* Predecrement */
+	sub	r0, r0, #8
+	vld1.32	{d0}, [r1]
+	subs	r12, r12, #1
+	vst1.32	{d0}, [r0]
+	bne	.Lneon_b2f_copy_8_loop
+	ands	r2, r2, #0x7
+	beq	.Lneon_memmove_done
+.Lneon_b2f_copy_4:
+	movs	r12, r2, lsr #0x2
+	beq	.Lneon_b2f_copy_1
+.Lneon_b2f_copy_4_loop:
+	ldr	r3, [r1, #-4]!
+	subs	r12, r12, #1
+	str	r3, [r0, #-4]!
+	bne	.Lneon_b2f_copy_4_loop
+	ands	r2, r2, #0x3
+.Lneon_b2f_copy_1:
+	cmp	r2, #0
+	beq	.Lneon_memmove_done
+	.balignl 64, NOP_OPCODE, 4*2
+.Lneon_b2f_copy_1_loop:
+	ldrb	r12, [r1, #-1]!
+	subs	r2, r2, #1
+	strb	r12, [r0, #-1]!
+	bne	.Lneon_b2f_copy_1_loop
+
+.Lneon_memmove_done:
+	pop	{r0}
+	bx	lr
+
+	.end
+
+
diff --git a/libc/arch-arm/krait/bionic/strcmp.S b/libc/arch-arm/krait/bionic/strcmp.S
index d614b9d..764a531 100644
--- a/libc/arch-arm/krait/bionic/strcmp.S
+++ b/libc/arch-arm/krait/bionic/strcmp.S
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2013 ARM Ltd
+ * Copyright (c) 2011 The Android Open Source Project
+ * Copyright (c) 2008 ARM Ltd
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -29,358 +30,110 @@
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
 
+	.text
+
 #ifdef __ARMEB__
-#define S2LOMEM lsl
-#define S2LOMEMEQ lsleq
-#define S2HIMEM lsr
+#define SHFT2LSB lsl
+#define SHFT2LSBEQ lsleq
+#define SHFT2MSB lsr
+#define SHFT2MSBEQ lsreq
 #define MSB 0x000000ff
 #define LSB 0xff000000
-#define BYTE0_OFFSET 24
-#define BYTE1_OFFSET 16
-#define BYTE2_OFFSET 8
-#define BYTE3_OFFSET 0
-#else /* not  __ARMEB__ */
-#define S2LOMEM lsr
-#define S2LOMEMEQ lsreq
-#define S2HIMEM lsl
-#define BYTE0_OFFSET 0
-#define BYTE1_OFFSET 8
-#define BYTE2_OFFSET 16
-#define BYTE3_OFFSET 24
+#else
+#define SHFT2LSB lsr
+#define SHFT2LSBEQ lsreq
+#define SHFT2MSB lsl
+#define SHFT2MSBEQ lsleq
 #define MSB 0xff000000
 #define LSB 0x000000ff
-#endif /* not  __ARMEB__ */
-
-.syntax         unified
-
-#if defined (__thumb__)
-        .thumb
-        .thumb_func
 #endif
 
+#define magic1(REG) REG
+#define magic2(REG) REG, lsl #7
+
 ENTRY(strcmp)
-      /* Use LDRD whenever possible.  */
+	PLD(r0, #0)
+	PLD(r1, #0)
+	eor	r2, r0, r1
+	tst	r2, #3
 
-/* The main thing to look out for when comparing large blocks is that
-   the loads do not cross a page boundary when loading past the index
-   of the byte with the first difference or the first string-terminator.
+	/* Strings not at same byte offset from a word boundary.  */
+	bne	.Lstrcmp_unaligned
+	ands	r2, r0, #3
+	bic	r0, r0, #3
+	bic	r1, r1, #3
+	ldr	ip, [r0], #4
+	it	eq
+	ldreq	r3, [r1], #4
+	beq	1f
 
-   For example, if the strings are identical and the string-terminator
-   is at index k, byte by byte comparison will not load beyond address
-   s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
-   k; double word - up to 7 bytes.  If the load of these bytes crosses
-   a page boundary, it might cause a memory fault (if the page is not mapped)
-   that would not have happened in byte by byte comparison.
+	/* Although s1 and s2 have identical initial alignment, they are
+	 * not currently word aligned.  Rather than comparing bytes,
+	 * make sure that any bytes fetched from before the addressed
+	 * bytes are forced to 0xff.  Then they will always compare
+	 * equal.
+	 */
+	eor	r2, r2, #3
+	lsl	r2, r2, #3
+	mvn	r3, #MSB
+	SHFT2LSB	r2, r3, r2
+	ldr	r3, [r1], #4
+	orr	ip, ip, r2
+	orr	r3, r3, r2
+1:
+	/* Load the 'magic' constant 0x01010101. */
+	str	r4, [sp, #-4]!
+	mov	r4, #1
+	orr	r4, r4, r4, lsl #8
+	orr	r4, r4, r4, lsl #16
+	.p2align	2
+4:
+	PLD(r0, #8)
+	PLD(r1, #8)
+	sub	r2, ip, magic1(r4)
+	cmp	ip, r3
+	itttt	eq
 
-   If an address is (double) word aligned, then a load of a (double) word
-   from that address will not cross a page boundary.
-   Therefore, the algorithm below considers word and double-word alignment
-   of strings separately.  */
-
-/* High-level description of the algorithm.
-
-   * The fast path: if both strings are double-word aligned,
-     use LDRD to load two words from each string in every loop iteration.
-   * If the strings have the same offset from a word boundary,
-     use LDRB to load and compare byte by byte until
-     the first string is aligned to a word boundary (at most 3 bytes).
-     This is optimized for quick return on short unaligned strings.
-   * If the strings have the same offset from a double-word boundary,
-     use LDRD to load two words from each string in every loop iteration, as in the fast path.
-   * If the strings do not have the same offset from a double-word boundary,
-     load a word from the second string before the loop to initialize the queue.
-     Use LDRD to load two words from every string in every loop iteration.
-     Inside the loop, load the second word from the second string only after comparing
-     the first word, using the queued value, to guarantee safety across page boundaries.
-   * If the strings do not have the same offset from a word boundary,
-     use LDR and a shift queue. Order of loads and comparisons matters,
-     similarly to the previous case.
-
-   * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
-   * The only difference between ARM and Thumb modes is the use of CBZ instruction.
-   * The only difference between big and little endian is the use of REV in little endian
-     to compute the return value, instead of MOV.
-*/
-
-        .macro m_cbz reg label
-#ifdef __thumb2__
-        cbz     \reg, \label
-#else   /* not defined __thumb2__ */
-        cmp     \reg, #0
-        beq     \label
-#endif /* not defined __thumb2__ */
-        .endm /* m_cbz */
-
-        .macro m_cbnz reg label
-#ifdef __thumb2__
-        cbnz    \reg, \label
-#else   /* not defined __thumb2__ */
-        cmp     \reg, #0
-        bne     \label
-#endif /* not defined __thumb2__ */
-        .endm /* m_cbnz */
-
-        .macro  init
-        /* Macro to save temporary registers and prepare magic values.  */
-        subs    sp, sp, #16
-        strd    r4, r5, [sp, #8]
-        strd    r6, r7, [sp]
-        mvn     r6, #0  /* all F */
-        mov     r7, #0  /* all 0 */
-        .endm   /* init */
-
-        .macro  magic_compare_and_branch w1 w2 label
-        /* Macro to compare registers w1 and w2 and conditionally branch to label.  */
-        cmp     \w1, \w2        /* Are w1 and w2 the same?  */
-        magic_find_zero_bytes \w1
-        it      eq
-        cmpeq   ip, #0          /* Is there a zero byte in w1?  */
-        bne     \label
-        .endm /* magic_compare_and_branch */
-
-        .macro  magic_find_zero_bytes w1
-        /* Macro to find all-zero bytes in w1, result is in ip.  */
-#if (defined (__ARM_FEATURE_DSP))
-        uadd8   ip, \w1, r6
-        sel     ip, r7, r6
-#else /* not defined (__ARM_FEATURE_DSP) */
-        /* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
-        Coincidently, these processors only have Thumb-2 mode, where we can use the
-        the (large) magic constant available directly as an immediate in instructions.
-        Note that we cannot use the magic constant in ARM mode, where we need
-        to create the constant in a register.  */
-        sub     ip, \w1, #0x01010101
-        bic     ip, ip, \w1
-        and     ip, ip, #0x80808080
-#endif /* not defined (__ARM_FEATURE_DSP) */
-        .endm /* magic_find_zero_bytes */
-
-        .macro  setup_return w1 w2
-#ifdef __ARMEB__
-        mov     r1, \w1
-        mov     r2, \w2
-#else /* not  __ARMEB__ */
-        rev     r1, \w1
-        rev     r2, \w2
-#endif /* not  __ARMEB__ */
-        .endm /* setup_return */
-
-        pld [r0, #0]
-        pld [r1, #0]
-
-        /* Are both strings double-word aligned?  */
-        orr     ip, r0, r1
-        tst     ip, #7
-        bne     do_align
-
-        /* Fast path.  */
-        init
-
-doubleword_aligned:
-
-        /* Get here when the strings to compare are double-word aligned.  */
-        /* Compare two words in every iteration.  */
-        .p2align        2
+	/* check for any zero bytes in first word */
+	biceq	r2, r2, ip
+	tsteq	r2, magic2(r4)
+	ldreq	ip, [r0], #4
+	ldreq	r3, [r1], #4
+	beq	4b
 2:
-        pld [r0, #16]
-        pld [r1, #16]
-
-        /* Load the next double-word from each string.  */
-        ldrd    r2, r3, [r0], #8
-        ldrd    r4, r5, [r1], #8
-
-        magic_compare_and_branch w1=r2, w2=r4, label=return_24
-        magic_compare_and_branch w1=r3, w2=r5, label=return_35
-        b       2b
-
-do_align:
-        /* Is the first string word-aligned?  */
-        ands    ip, r0, #3
-        beq     word_aligned_r0
-
-        /* Fast compare byte by byte until the first string is word-aligned.  */
-        /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
-        to read until the next word boundary is 4-ip.  */
-        bic     r0, r0, #3
-        ldr     r2, [r0], #4
-        lsls    ip, ip, #31
-        beq     byte2
-        bcs     byte3
-
-byte1:
-        ldrb    ip, [r1], #1
-        uxtb    r3, r2, ror #BYTE1_OFFSET
-        subs    ip, r3, ip
-        bne     fast_return
-        m_cbz   reg=r3, label=fast_return
-
-byte2:
-        ldrb    ip, [r1], #1
-        uxtb    r3, r2, ror #BYTE2_OFFSET
-        subs    ip, r3, ip
-        bne     fast_return
-        m_cbz   reg=r3, label=fast_return
-
-byte3:
-        ldrb    ip, [r1], #1
-        uxtb    r3, r2, ror #BYTE3_OFFSET
-        subs    ip, r3, ip
-        bne     fast_return
-        m_cbnz  reg=r3, label=word_aligned_r0
-
-fast_return:
-        mov     r0, ip
-        bx      lr
-
-word_aligned_r0:
-        init
-        /* The first string is word-aligned.  */
-        /* Is the second string word-aligned?  */
-        ands    ip, r1, #3
-        bne     strcmp_unaligned
-
-word_aligned:
-        /* The strings are word-aligned. */
-        /* Is the first string double-word aligned?  */
-        tst     r0, #4
-        beq     doubleword_aligned_r0
-
-        /* If r0 is not double-word aligned yet, align it by loading
-        and comparing the next word from each string.  */
-        ldr     r2, [r0], #4
-        ldr     r4, [r1], #4
-        magic_compare_and_branch w1=r2 w2=r4 label=return_24
-
-doubleword_aligned_r0:
-        /* Get here when r0 is double-word aligned.  */
-        /* Is r1 doubleword_aligned?  */
-        tst     r1, #4
-        beq     doubleword_aligned
-
-        /* Get here when the strings to compare are word-aligned,
-        r0 is double-word aligned, but r1 is not double-word aligned.  */
-
-        /* Initialize the queue.  */
-        ldr     r5, [r1], #4
-
-        /* Compare two words in every iteration.  */
-        .p2align        2
-3:
-        pld [r0, #16]
-        pld [r1, #16]
-
-        /* Load the next double-word from each string and compare.  */
-        ldrd    r2, r3, [r0], #8
-        magic_compare_and_branch w1=r2 w2=r5 label=return_25
-        ldrd    r4, r5, [r1], #8
-        magic_compare_and_branch w1=r3 w2=r4 label=return_34
-        b       3b
-
-        .macro miscmp_word offsetlo offsethi
-        /* Macro to compare misaligned strings.  */
-        /* r0, r1 are word-aligned, and at least one of the strings
-        is not double-word aligned.  */
-        /* Compare one word in every loop iteration.  */
-        /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
-        OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word).  */
-
-        /* Initialize the shift queue.  */
-        ldr     r5, [r1], #4
-
-        /* Compare one word from each string in every loop iteration.  */
-        .p2align        2
-7:
-        ldr     r3, [r0], #4
-        S2LOMEM r5, r5, #\offsetlo
-        magic_find_zero_bytes w1=r3
-        cmp     r7, ip, S2HIMEM #\offsetlo
-        and     r2, r3, r6, S2LOMEM #\offsetlo
-        it      eq
-        cmpeq   r2, r5
-        bne     return_25
-        ldr     r5, [r1], #4
-        cmp     ip, #0
-        eor r3, r2, r3
-        S2HIMEM r2, r5, #\offsethi
-        it      eq
-        cmpeq   r3, r2
-        bne     return_32
-        b       7b
-        .endm /* miscmp_word */
-
-strcmp_unaligned:
-        /* r0 is word-aligned, r1 is at offset ip from a word.  */
-        /* Align r1 to the (previous) word-boundary.  */
-        bic     r1, r1, #3
-
-        /* Unaligned comparison word by word using LDRs. */
-        cmp     ip, #2
-        beq     miscmp_word_16                    /* If ip == 2.  */
-        bge     miscmp_word_24                    /* If ip == 3.  */
-        miscmp_word offsetlo=8 offsethi=24        /* If ip == 1.  */
-miscmp_word_24:  miscmp_word offsetlo=24 offsethi=8
-
-
-return_32:
-        setup_return w1=r3, w2=r2
-        b       do_return
-return_34:
-        setup_return w1=r3, w2=r4
-        b       do_return
-return_25:
-        setup_return w1=r2, w2=r5
-        b       do_return
-return_35:
-        setup_return w1=r3, w2=r5
-        b       do_return
-return_24:
-        setup_return w1=r2, w2=r4
-
-do_return:
-
+	/* There's a zero or a different byte in the word */
+	SHFT2MSB	r0, ip, #24
+	SHFT2LSB	ip, ip, #8
+	cmp	r0, #1
+	it	cs
+	cmpcs	r0, r3, SHFT2MSB #24
+	it	eq
+	SHFT2LSBEQ r3, r3, #8
+	beq	2b
+	/* On a big-endian machine, r0 contains the desired byte in bits
+	 * 0-7; on a little-endian machine they are in bits 24-31.  In
+	 * both cases the other bits in r0 are all zero.  For r3 the
+	 * interesting byte is at the other end of the word, but the
+	 * other bits are not necessarily zero.  We need a signed result
+	 * representing the differnece in the unsigned bytes, so for the
+	 * little-endian case we can't just shift the interesting bits up.
+	 */
 #ifdef __ARMEB__
-        mov     r0, ip
-#else /* not  __ARMEB__ */
-        rev     r0, ip
-#endif /* not  __ARMEB__ */
+	sub	r0, r0, r3, lsr #24
+#else
+	and	r3, r3, #255
+	/* No RSB instruction in Thumb2 */
+#ifdef __thumb2__
+	lsr	r0, r0, #24
+	sub	r0, r0, r3
+#else
+	rsb	r0, r3, r0, lsr #24
+#endif
+#endif
+	ldr	r4, [sp], #4
+	bx	lr
 
-        /* Restore temporaries early, before computing the return value.  */
-        ldrd    r6, r7, [sp]
-        ldrd    r4, r5, [sp, #8]
-        adds    sp, sp, #16
-
-        /* There is a zero or a different byte between r1 and r2.  */
-        /* r0 contains a mask of all-zero bytes in r1.  */
-        /* Using r0 and not ip here because cbz requires low register.  */
-        m_cbz   reg=r0, label=compute_return_value
-        clz     r0, r0
-        /* r0 contains the number of bits on the left of the first all-zero byte in r1.  */
-        rsb     r0, r0, #24
-        /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1.  */
-        lsr     r1, r1, r0
-        lsr     r2, r2, r0
-
-compute_return_value:
-        movs    r0, #1
-        cmp     r1, r2
-        /* The return value is computed as follows.
-        If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
-        If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
-        which means r0:=r0-r0-1 and r0 is #-1 at return.
-        If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
-        which means r0:=r0-r0 and r0 is #0 at return.
-        (C==0 and Z==1) cannot happen because the carry bit is "not borrow".  */
-        it      ls
-        sbcls   r0, r0, r0
-        bx      lr
-
-    /* The code from the previous version of strcmp.S handles this
-     * particular case (the second string is 2 bytes off a word alignment)
-     * faster than any current version. In this very specific case, use the
-     * previous version. See bionic/libc/arch-arm/cortex-a15/bionic/strcmp.S
-     * for the unedited version of this code.
-     */
-miscmp_word_16:
+.Lstrcmp_unaligned:
 	wp1 .req r0
 	wp2 .req r1
 	b1  .req r2
@@ -389,8 +142,22 @@
 	t1  .req ip
 	@ r3 is scratch
 
-    /* At this point, wp1 (r0) has already been word-aligned. */
+	/* First of all, compare bytes until wp1(sp1) is word-aligned. */
+1:
+	tst	wp1, #3
+	beq	2f
+	ldrb	r2, [wp1], #1
+	ldrb	r3, [wp2], #1
+	cmp	r2, #1
+	it	cs
+	cmpcs	r2, r3
+	beq	1b
+	sub	r0, r2, r3
+	bx	lr
+
 2:
+	str	r5, [sp, #-4]!
+	str	r4, [sp, #-4]!
 	mov	b1, #1
 	orr	b1, b1, b1, lsl #8
 	orr	b1, b1, b1, lsl #16
@@ -399,22 +166,72 @@
 	bic	wp2, wp2, #3
 	ldr	w1, [wp1], #4
 	ldr	w2, [wp2], #4
+	cmp	t1, #2
+	beq	2f
+	bhi	3f
 
-	/* Critical inner Loop: Block with 2 bytes initial overlap */
+	/* Critical inner Loop: Block with 3 bytes initial overlap */
 	.p2align	2
-2:
-	S2HIMEM	t1, w1, #16
+1:
+	bic	t1, w1, #MSB
+	cmp	t1, w2, SHFT2LSB #8
 	sub	r3, w1, b1
-	S2LOMEM	t1, t1, #16
 	bic	r3, r3, w1
-	cmp	t1, w2, S2LOMEM #16
 	bne	4f
 	ands	r3, r3, b1, lsl #7
 	it	eq
 	ldreq	w2, [wp2], #4
 	bne	5f
 	eor	t1, t1, w1
-	cmp	t1, w2, S2HIMEM #16
+	cmp	t1, w2, SHFT2MSB #24
+	bne	6f
+	ldr	w1, [wp1], #4
+	b	1b
+4:
+	SHFT2LSB	w2, w2, #8
+	b	8f
+
+5:
+#ifdef __ARMEB__
+	/* The syndrome value may contain false ones if the string ends
+	 * with the bytes 0x01 0x00
+	 */
+	tst	w1, #0xff000000
+	itt	ne
+	tstne	w1, #0x00ff0000
+	tstne	w1, #0x0000ff00
+	beq	7f
+#else
+	bics	r3, r3, #0xff000000
+	bne	7f
+#endif
+	ldrb	w2, [wp2]
+	SHFT2LSB	t1, w1, #24
+#ifdef __ARMEB__
+	lsl	w2, w2, #24
+#endif
+	b	8f
+
+6:
+	SHFT2LSB	t1, w1, #24
+	and	w2, w2, #LSB
+	b	8f
+
+	/* Critical inner Loop: Block with 2 bytes initial overlap */
+	.p2align	2
+2:
+	SHFT2MSB	t1, w1, #16
+	sub	r3, w1, b1
+	SHFT2LSB	t1, t1, #16
+	bic	r3, r3, w1
+	cmp	t1, w2, SHFT2LSB #16
+	bne	4f
+	ands	r3, r3, b1, lsl #7
+	it	eq
+	ldreq	w2, [wp2], #4
+	bne	5f
+	eor	t1, t1, w1
+	cmp	t1, w2, SHFT2MSB #16
 	bne	6f
 	ldr	w1, [wp1], #4
 	b	2b
@@ -433,27 +250,54 @@
 	bne	7f
 #endif
 	ldrh	w2, [wp2]
-	S2LOMEM	t1, w1, #16
+	SHFT2LSB	t1, w1, #16
 #ifdef __ARMEB__
 	lsl	w2, w2, #16
 #endif
 	b	8f
 
 6:
-	S2HIMEM	w2, w2, #16
-	S2LOMEM	t1, w1, #16
+	SHFT2MSB	w2, w2, #16
+	SHFT2LSB	t1, w1, #16
 4:
-	S2LOMEM	w2, w2, #16
+	SHFT2LSB	w2, w2, #16
 	b	8f
 
+	/* Critical inner Loop: Block with 1 byte initial overlap */
+	.p2align	2
+3:
+	and	t1, w1, #LSB
+	cmp	t1, w2, SHFT2LSB #24
+	sub	r3, w1, b1
+	bic	r3, r3, w1
+	bne	4f
+	ands	r3, r3, b1, lsl #7
+	it	eq
+	ldreq	w2, [wp2], #4
+	bne	5f
+	eor	t1, t1, w1
+	cmp	t1, w2, SHFT2MSB #8
+	bne	6f
+	ldr	w1, [wp1], #4
+	b	3b
+4:
+	SHFT2LSB	w2, w2, #24
+	b	8f
+5:
+	/* The syndrome value may contain false ones if the string ends
+	 * with the bytes 0x01 0x00
+	 */
+	tst	w1, #LSB
+	beq	7f
+	ldr	w2, [wp2], #4
+6:
+	SHFT2LSB	t1, w1, #8
+	bic	w2, w2, #MSB
+	b	8f
 7:
 	mov	r0, #0
-
-    /* Restore registers and stack. */
-    ldrd    r6, r7, [sp]
-    ldrd    r4, r5, [sp, #8]
-    adds    sp, sp, #16
-
+	ldr	r4, [sp], #4
+	ldr	r5, [sp], #4
 	bx	lr
 
 8:
@@ -463,15 +307,11 @@
 	it	cs
 	cmpcs	r0, r2
 	itt	eq
-	S2LOMEMEQ	t1, t1, #8
-	S2LOMEMEQ	w2, w2, #8
+	SHFT2LSBEQ	t1, t1, #8
+	SHFT2LSBEQ	w2, w2, #8
 	beq	8b
 	sub	r0, r2, r0
-
-    /* Restore registers and stack. */
-    ldrd    r6, r7, [sp]
-    ldrd    r4, r5, [sp, #8]
-    adds    sp, sp, #16
-
+	ldr	r4, [sp], #4
+	ldr	r5, [sp], #4
 	bx	lr
 END(strcmp)
diff --git a/libc/arch-arm/krait/krait.mk b/libc/arch-arm/krait/krait.mk
index 4847f86..b4949b4 100644
--- a/libc/arch-arm/krait/krait.mk
+++ b/libc/arch-arm/krait/krait.mk
@@ -1,5 +1,6 @@
 $(call libc-add-cpu-variant-src,MEMCPY,arch-arm/krait/bionic/memcpy.S)
 $(call libc-add-cpu-variant-src,MEMSET,arch-arm/krait/bionic/memset.S)
 $(call libc-add-cpu-variant-src,STRCMP,arch-arm/krait/bionic/strcmp.S)
+$(call libc-add-cpu-variant-src,MEMMOVE,arch-arm/krait/bionic/memmove.S)
 
-include bionic/libc/arch-arm/generic/generic.mk
+#include bionic/libc/arch-arm/generic/generic.mk