blob: f6e4a7d9bc992f5c7275fa4ccdd0d7ecb9b12f14 [file] [log] [blame]
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 .text
29
30 .global memcpy
31 .type memcpy, %function
32 .align 4
33
34 /*
35 * Optimized memcpy() for ARM.
36 *
37 * note that memcpy() always returns the destination pointer,
38 * so we have to preserve R0.
39 */
40
41memcpy:
42 /* The stack must always be 64-bits aligned to be compliant with the
43 * ARM ABI. Since we have to save R0, we might as well save R4
44 * which we can use for better pipelining of the reads below
45 */
46 .fnstart
47 .save {r0, r4, lr}
48 stmfd sp!, {r0, r4, lr}
49 /* Making room for r5-r11 which will be spilled later */
50 .pad #28
51 sub sp, sp, #28
52
53 // preload the destination because we'll align it to a cache line
54 // with small writes. Also start the source "pump".
55 pld [r0, #0]
56 pld [r1, #0]
57 pld [r1, #32]
58
59 /* it simplifies things to take care of len<4 early */
60 cmp r2, #4
61 blo copy_last_3_and_return
62
63 /* compute the offset to align the source
64 * offset = (4-(src&3))&3 = -src & 3
65 */
66 rsb r3, r1, #0
67 ands r3, r3, #3
68 beq src_aligned
69
70 /* align source to 32 bits. We need to insert 2 instructions between
71 * a ldr[b|h] and str[b|h] because byte and half-word instructions
72 * stall 2 cycles.
73 */
74 movs r12, r3, lsl #31
75 sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
76 ldrmib r3, [r1], #1
77 ldrcsb r4, [r1], #1
78 ldrcsb r12,[r1], #1
79 strmib r3, [r0], #1
80 strcsb r4, [r0], #1
81 strcsb r12,[r0], #1
82
83src_aligned:
84
85 /* see if src and dst are aligned together (congruent) */
86 eor r12, r0, r1
87 tst r12, #3
88 bne non_congruent
89
90 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
91 * frame. Don't update sp.
92 */
93 stmea sp, {r5-r11}
94
95 /* align the destination to a cache-line */
96 rsb r3, r0, #0
97 ands r3, r3, #0x1C
98 beq congruent_aligned32
99 cmp r3, r2
100 andhi r3, r2, #0x1C
101
102 /* conditionnaly copies 0 to 7 words (length in r3) */
103 movs r12, r3, lsl #28
104 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
105 ldmmiia r1!, {r8, r9} /* 8 bytes */
106 stmcsia r0!, {r4, r5, r6, r7}
107 stmmiia r0!, {r8, r9}
108 tst r3, #0x4
109 ldrne r10,[r1], #4 /* 4 bytes */
110 strne r10,[r0], #4
111 sub r2, r2, r3
112
113congruent_aligned32:
114 /*
115 * here source is aligned to 32 bytes.
116 */
117
118cached_aligned32:
119 subs r2, r2, #32
120 blo less_than_32_left
121
122 /*
123 * We preload a cache-line up to 64 bytes ahead. On the 926, this will
124 * stall only until the requested world is fetched, but the linefill
125 * continues in the the background.
126 * While the linefill is going, we write our previous cache-line
127 * into the write-buffer (which should have some free space).
128 * When the linefill is done, the writebuffer will
129 * start dumping its content into memory
130 *
131 * While all this is going, we then load a full cache line into
132 * 8 registers, this cache line should be in the cache by now
133 * (or partly in the cache).
134 *
135 * This code should work well regardless of the source/dest alignment.
136 *
137 */
138
139 // Align the preload register to a cache-line because the cpu does
140 // "critical word first" (the first word requested is loaded first).
141 bic r12, r1, #0x1F
142 add r12, r12, #64
143
1441: ldmia r1!, { r4-r11 }
145 pld [r12, #64]
146 subs r2, r2, #32
147
148 // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
149 // for ARM9 preload will not be safely guarded by the preceding subs.
150 // When it is safely guarded the only possibility to have SIGSEGV here
151 // is because the caller overstates the length.
152 ldrhi r3, [r12], #32 /* cheap ARM9 preload */
153 stmia r0!, { r4-r11 }
154 bhs 1b
155
156 add r2, r2, #32
157
158
159
160
161less_than_32_left:
162 /*
163 * less than 32 bytes left at this point (length in r2)
164 */
165
166 /* skip all this if there is nothing to do, which should
167 * be a common case (if not executed the code below takes
168 * about 16 cycles)
169 */
170 tst r2, #0x1F
171 beq 1f
172
173 /* conditionnaly copies 0 to 31 bytes */
174 movs r12, r2, lsl #28
175 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
176 ldmmiia r1!, {r8, r9} /* 8 bytes */
177 stmcsia r0!, {r4, r5, r6, r7}
178 stmmiia r0!, {r8, r9}
179 movs r12, r2, lsl #30
180 ldrcs r3, [r1], #4 /* 4 bytes */
181 ldrmih r4, [r1], #2 /* 2 bytes */
182 strcs r3, [r0], #4
183 strmih r4, [r0], #2
184 tst r2, #0x1
185 ldrneb r3, [r1] /* last byte */
186 strneb r3, [r0]
187
188 /* we're done! restore everything and return */
1891: ldmfd sp!, {r5-r11}
190 ldmfd sp!, {r0, r4, lr}
191 bx lr
192
193 /********************************************************************/
194
195non_congruent:
196 /*
197 * here source is aligned to 4 bytes
198 * but destination is not.
199 *
200 * in the code below r2 is the number of bytes read
201 * (the number of bytes written is always smaller, because we have
202 * partial words in the shift queue)
203 */
204 cmp r2, #4
205 blo copy_last_3_and_return
206
207 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
208 * frame. Don't update sp.
209 */
210 stmea sp, {r5-r11}
211
212 /* compute shifts needed to align src to dest */
213 rsb r5, r0, #0
214 and r5, r5, #3 /* r5 = # bytes in partial words */
215 mov r12, r5, lsl #3 /* r12 = right */
216 rsb lr, r12, #32 /* lr = left */
217
218 /* read the first word */
219 ldr r3, [r1], #4
220 sub r2, r2, #4
221
222 /* write a partial word (0 to 3 bytes), such that destination
223 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
224 */
225 movs r5, r5, lsl #31
226 strmib r3, [r0], #1
227 movmi r3, r3, lsr #8
228 strcsb r3, [r0], #1
229 movcs r3, r3, lsr #8
230 strcsb r3, [r0], #1
231 movcs r3, r3, lsr #8
232
233 cmp r2, #4
234 blo partial_word_tail
235
236 /* Align destination to 32 bytes (cache line boundary) */
2371: tst r0, #0x1c
238 beq 2f
239 ldr r5, [r1], #4
240 sub r2, r2, #4
241 orr r4, r3, r5, lsl lr
242 mov r3, r5, lsr r12
243 str r4, [r0], #4
244 cmp r2, #4
245 bhs 1b
246 blo partial_word_tail
247
248 /* copy 32 bytes at a time */
2492: subs r2, r2, #32
250 blo less_than_thirtytwo
251
252 /* Use immediate mode for the shifts, because there is an extra cycle
253 * for register shifts, which could account for up to 50% of
254 * performance hit.
255 */
256
257 cmp r12, #24
258 beq loop24
259 cmp r12, #8
260 beq loop8
261
262loop16:
263 ldr r12, [r1], #4
2641: mov r4, r12
265 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
266 pld [r1, #64]
267 subs r2, r2, #32
268 ldrhs r12, [r1], #4
269 orr r3, r3, r4, lsl #16
270 mov r4, r4, lsr #16
271 orr r4, r4, r5, lsl #16
272 mov r5, r5, lsr #16
273 orr r5, r5, r6, lsl #16
274 mov r6, r6, lsr #16
275 orr r6, r6, r7, lsl #16
276 mov r7, r7, lsr #16
277 orr r7, r7, r8, lsl #16
278 mov r8, r8, lsr #16
279 orr r8, r8, r9, lsl #16
280 mov r9, r9, lsr #16
281 orr r9, r9, r10, lsl #16
282 mov r10, r10, lsr #16
283 orr r10, r10, r11, lsl #16
284 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
285 mov r3, r11, lsr #16
286 bhs 1b
287 b less_than_thirtytwo
288
289loop8:
290 ldr r12, [r1], #4
2911: mov r4, r12
292 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
293 pld [r1, #64]
294 subs r2, r2, #32
295 ldrhs r12, [r1], #4
296 orr r3, r3, r4, lsl #24
297 mov r4, r4, lsr #8
298 orr r4, r4, r5, lsl #24
299 mov r5, r5, lsr #8
300 orr r5, r5, r6, lsl #24
301 mov r6, r6, lsr #8
302 orr r6, r6, r7, lsl #24
303 mov r7, r7, lsr #8
304 orr r7, r7, r8, lsl #24
305 mov r8, r8, lsr #8
306 orr r8, r8, r9, lsl #24
307 mov r9, r9, lsr #8
308 orr r9, r9, r10, lsl #24
309 mov r10, r10, lsr #8
310 orr r10, r10, r11, lsl #24
311 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
312 mov r3, r11, lsr #8
313 bhs 1b
314 b less_than_thirtytwo
315
316loop24:
317 ldr r12, [r1], #4
3181: mov r4, r12
319 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
320 pld [r1, #64]
321 subs r2, r2, #32
322 ldrhs r12, [r1], #4
323 orr r3, r3, r4, lsl #8
324 mov r4, r4, lsr #24
325 orr r4, r4, r5, lsl #8
326 mov r5, r5, lsr #24
327 orr r5, r5, r6, lsl #8
328 mov r6, r6, lsr #24
329 orr r6, r6, r7, lsl #8
330 mov r7, r7, lsr #24
331 orr r7, r7, r8, lsl #8
332 mov r8, r8, lsr #24
333 orr r8, r8, r9, lsl #8
334 mov r9, r9, lsr #24
335 orr r9, r9, r10, lsl #8
336 mov r10, r10, lsr #24
337 orr r10, r10, r11, lsl #8
338 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
339 mov r3, r11, lsr #24
340 bhs 1b
341
342
343less_than_thirtytwo:
344 /* copy the last 0 to 31 bytes of the source */
345 rsb r12, lr, #32 /* we corrupted r12, recompute it */
346 add r2, r2, #32
347 cmp r2, #4
348 blo partial_word_tail
349
3501: ldr r5, [r1], #4
351 sub r2, r2, #4
352 orr r4, r3, r5, lsl lr
353 mov r3, r5, lsr r12
354 str r4, [r0], #4
355 cmp r2, #4
356 bhs 1b
357
358partial_word_tail:
359 /* we have a partial word in the input buffer */
360 movs r5, lr, lsl #(31-3)
361 strmib r3, [r0], #1
362 movmi r3, r3, lsr #8
363 strcsb r3, [r0], #1
364 movcs r3, r3, lsr #8
365 strcsb r3, [r0], #1
366
367 /* Refill spilled registers from the stack. Don't update sp. */
368 ldmfd sp, {r5-r11}
369
370copy_last_3_and_return:
371 movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
372 ldrmib r2, [r1], #1
373 ldrcsb r3, [r1], #1
374 ldrcsb r12,[r1]
375 strmib r2, [r0], #1
376 strcsb r3, [r0], #1
377 strcsb r12,[r0]
378
379 /* we're done! restore sp and spilled registers and return */
380 add sp, sp, #28
381 ldmfd sp!, {r0, r4, lr}
382 bx lr
383 .fnend
384