blob: 84d5e6a1e76fab376dcb65dd25702f082df1aa22 [file] [log] [blame]
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +09001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Quick'n'dirty IP checksum ...
7 *
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +010010 * Copyright (C) 2007 Maciej W. Rozycki
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090011 */
Atsushi Nemotof860c902006-12-13 01:22:06 +090012#include <linux/errno.h>
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090013#include <asm/asm.h>
Atsushi Nemotof860c902006-12-13 01:22:06 +090014#include <asm/asm-offsets.h>
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090015#include <asm/regdef.h>
16
17#ifdef CONFIG_64BIT
Atsushi Nemoto52ffe762006-12-08 01:04:31 +090018/*
19 * As we are sharing code base with the mips32 tree (which use the o32 ABI
20 * register definitions). We need to redefine the register definitions from
21 * the n64 ABI register naming to the o32 ABI register naming.
22 */
23#undef t0
24#undef t1
25#undef t2
26#undef t3
27#define t0 $8
28#define t1 $9
29#define t2 $10
30#define t3 $11
31#define t4 $12
32#define t5 $13
33#define t6 $14
34#define t7 $15
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090035
36#define USE_DOUBLE
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090037#endif
38
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090039#ifdef USE_DOUBLE
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090040
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090041#define LOAD ld
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020042#define LOAD32 lwu
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090043#define ADD daddu
44#define NBYTES 8
45
46#else
47
48#define LOAD lw
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020049#define LOAD32 lw
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090050#define ADD addu
51#define NBYTES 4
52
53#endif /* USE_DOUBLE */
54
55#define UNIT(unit) ((unit)*NBYTES)
56
57#define ADDC(sum,reg) \
58 ADD sum, reg; \
59 sltu v1, sum, reg; \
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +010060 ADD sum, v1; \
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090061
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020062#define ADDC32(sum,reg) \
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020063 addu sum, reg; \
64 sltu v1, sum, reg; \
65 addu sum, v1; \
Atsushi Nemotob80a1b82008-09-20 17:20:04 +020066
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090067#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
68 LOAD _t0, (offset + UNIT(0))(src); \
69 LOAD _t1, (offset + UNIT(1))(src); \
70 LOAD _t2, (offset + UNIT(2))(src); \
71 LOAD _t3, (offset + UNIT(3))(src); \
72 ADDC(sum, _t0); \
73 ADDC(sum, _t1); \
74 ADDC(sum, _t2); \
75 ADDC(sum, _t3)
76
77#ifdef USE_DOUBLE
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090078#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +090079 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
80#else
81#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
82 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
83 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
84#endif
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090085
86/*
87 * a0: source address
88 * a1: length of the area to checksum
89 * a2: partial checksum
90 */
91
92#define src a0
93#define sum v0
94
95 .text
96 .set noreorder
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +090097 .align 5
98LEAF(csum_partial)
99 move sum, zero
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900100 move t7, zero
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900101
102 sltiu t8, a1, 0x8
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000103 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900104 move t2, a1
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900105
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900106 andi t7, src, 0x1 /* odd buffer? */
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900107
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000108.Lhword_align:
109 beqz t7, .Lword_align
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900110 andi t8, src, 0x2
111
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900112 lbu t0, (src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900113 LONG_SUBU a1, a1, 0x1
114#ifdef __MIPSEL__
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900115 sll t0, t0, 8
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900116#endif
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900117 ADDC(sum, t0)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900118 PTR_ADDU src, src, 0x1
119 andi t8, src, 0x2
120
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000121.Lword_align:
122 beqz t8, .Ldword_align
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900123 sltiu t8, a1, 56
124
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900125 lhu t0, (src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900126 LONG_SUBU a1, a1, 0x2
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900127 ADDC(sum, t0)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900128 sltiu t8, a1, 56
129 PTR_ADDU src, src, 0x2
130
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000131.Ldword_align:
132 bnez t8, .Ldo_end_words
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900133 move t8, a1
134
135 andi t8, src, 0x4
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000136 beqz t8, .Lqword_align
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900137 andi t8, src, 0x8
138
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200139 LOAD32 t0, 0x00(src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900140 LONG_SUBU a1, a1, 0x4
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900141 ADDC(sum, t0)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900142 PTR_ADDU src, src, 0x4
143 andi t8, src, 0x8
144
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000145.Lqword_align:
146 beqz t8, .Loword_align
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900147 andi t8, src, 0x10
148
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900149#ifdef USE_DOUBLE
150 ld t0, 0x00(src)
151 LONG_SUBU a1, a1, 0x8
152 ADDC(sum, t0)
153#else
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900154 lw t0, 0x00(src)
155 lw t1, 0x04(src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900156 LONG_SUBU a1, a1, 0x8
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900157 ADDC(sum, t0)
158 ADDC(sum, t1)
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900159#endif
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900160 PTR_ADDU src, src, 0x8
161 andi t8, src, 0x10
162
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000163.Loword_align:
164 beqz t8, .Lbegin_movement
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900165 LONG_SRL t8, a1, 0x7
166
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900167#ifdef USE_DOUBLE
168 ld t0, 0x00(src)
169 ld t1, 0x08(src)
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900170 ADDC(sum, t0)
171 ADDC(sum, t1)
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900172#else
173 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
174#endif
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900175 LONG_SUBU a1, a1, 0x10
176 PTR_ADDU src, src, 0x10
177 LONG_SRL t8, a1, 0x7
178
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000179.Lbegin_movement:
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900180 beqz t8, 1f
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900181 andi t2, a1, 0x40
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900182
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000183.Lmove_128bytes:
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900184 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
185 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
186 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
187 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900188 LONG_SUBU t8, t8, 0x01
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100189 .set reorder /* DADDI_WAR */
190 PTR_ADDU src, src, 0x80
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000191 bnez t8, .Lmove_128bytes
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100192 .set noreorder
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900193
1941:
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900195 beqz t2, 1f
196 andi t2, a1, 0x20
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900197
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000198.Lmove_64bytes:
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900199 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
200 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900201 PTR_ADDU src, src, 0x40
202
2031:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000204 beqz t2, .Ldo_end_words
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900205 andi t8, a1, 0x1c
206
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000207.Lmove_32bytes:
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900208 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900209 andi t8, a1, 0x1c
210 PTR_ADDU src, src, 0x20
211
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000212.Ldo_end_words:
213 beqz t8, .Lsmall_csumcpy
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900214 andi t2, a1, 0x3
215 LONG_SRL t8, t8, 0x2
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900216
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000217.Lend_words:
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200218 LOAD32 t0, (src)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900219 LONG_SUBU t8, t8, 0x1
Atsushi Nemoto52ffe762006-12-08 01:04:31 +0900220 ADDC(sum, t0)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100221 .set reorder /* DADDI_WAR */
222 PTR_ADDU src, src, 0x4
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000223 bnez t8, .Lend_words
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100224 .set noreorder
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900225
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900226/* unknown src alignment and < 8 bytes to go */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000227.Lsmall_csumcpy:
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900228 move a1, t2
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900229
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900230 andi t0, a1, 4
231 beqz t0, 1f
232 andi t0, a1, 2
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900233
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900234 /* Still a full word to go */
235 ulw t1, (src)
236 PTR_ADDIU src, 4
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200237#ifdef USE_DOUBLE
238 dsll t1, t1, 32 /* clear lower 32bit */
239#endif
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900240 ADDC(sum, t1)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900241
Atsushi Nemoto773ff782006-12-08 01:04:45 +09002421: move t1, zero
243 beqz t0, 1f
244 andi t0, a1, 1
245
246 /* Still a halfword to go */
247 ulhu t1, (src)
248 PTR_ADDIU src, 2
249
2501: beqz t0, 1f
251 sll t1, t1, 16
252
253 lbu t2, (src)
254 nop
255
256#ifdef __MIPSEB__
257 sll t2, t2, 8
258#endif
259 or t1, t2
260
2611: ADDC(sum, t1)
262
263 /* fold checksum */
Atsushi Nemotoed99e2b2006-12-08 01:04:51 +0900264#ifdef USE_DOUBLE
265 dsll32 v1, sum, 0
266 daddu sum, v1
267 sltu v1, sum, v1
268 dsra32 sum, sum, 0
269 addu sum, v1
270#endif
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900271
272 /* odd buffer alignment? */
273 beqz t7, 1f
274 nop
275 sll v1, sum, 8
276 srl sum, sum, 8
277 or sum, v1
278 andi sum, 0xffff
2791:
280 .set reorder
281 /* Add the passed partial csum. */
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200282 ADDC32(sum, a2)
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900283 jr ra
Atsushi Nemoto773ff782006-12-08 01:04:45 +0900284 .set noreorder
Atsushi Nemoto0bcdda02006-12-04 00:42:59 +0900285 END(csum_partial)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900286
287
288/*
289 * checksum and copy routines based on memcpy.S
290 *
291 * csum_partial_copy_nocheck(src, dst, len, sum)
292 * __csum_partial_copy_user(src, dst, len, sum, errp)
293 *
294 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
295 * function in this file use the standard calling convention.
296 */
297
298#define src a0
299#define dst a1
300#define len a2
301#define psum a3
302#define sum v0
303#define odd t8
304#define errptr t9
305
306/*
307 * The exception handler for loads requires that:
308 * 1- AT contain the address of the byte just past the end of the source
309 * of the copy,
310 * 2- src_entry <= src < AT, and
311 * 3- (dst - src) == (dst_entry - src_entry),
312 * The _entry suffix denotes values when __copy_user was called.
313 *
314 * (1) is set up up by __csum_partial_copy_from_user and maintained by
315 * not writing AT in __csum_partial_copy
316 * (2) is met by incrementing src by the number of bytes copied
317 * (3) is met by not doing loads between a pair of increments of dst and src
318 *
319 * The exception handlers for stores stores -EFAULT to errptr and return.
320 * These handlers do not need to overwrite any data.
321 */
322
323#define EXC(inst_reg,addr,handler) \
3249: inst_reg, addr; \
325 .section __ex_table,"a"; \
326 PTR 9b, handler; \
327 .previous
328
329#ifdef USE_DOUBLE
330
331#define LOAD ld
332#define LOADL ldl
333#define LOADR ldr
334#define STOREL sdl
335#define STORER sdr
336#define STORE sd
337#define ADD daddu
338#define SUB dsubu
339#define SRL dsrl
340#define SLL dsll
341#define SLLV dsllv
342#define SRLV dsrlv
343#define NBYTES 8
344#define LOG_NBYTES 3
345
346#else
347
348#define LOAD lw
349#define LOADL lwl
350#define LOADR lwr
351#define STOREL swl
352#define STORER swr
353#define STORE sw
354#define ADD addu
355#define SUB subu
356#define SRL srl
357#define SLL sll
358#define SLLV sllv
359#define SRLV srlv
360#define NBYTES 4
361#define LOG_NBYTES 2
362
363#endif /* USE_DOUBLE */
364
365#ifdef CONFIG_CPU_LITTLE_ENDIAN
366#define LDFIRST LOADR
367#define LDREST LOADL
368#define STFIRST STORER
369#define STREST STOREL
370#define SHIFT_DISCARD SLLV
371#define SHIFT_DISCARD_REVERT SRLV
372#else
373#define LDFIRST LOADL
374#define LDREST LOADR
375#define STFIRST STOREL
376#define STREST STORER
377#define SHIFT_DISCARD SRLV
378#define SHIFT_DISCARD_REVERT SLLV
379#endif
380
381#define FIRST(unit) ((unit)*NBYTES)
382#define REST(unit) (FIRST(unit)+NBYTES-1)
383
384#define ADDRMASK (NBYTES-1)
385
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100386#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
Atsushi Nemotof860c902006-12-13 01:22:06 +0900387 .set noat
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100388#else
389 .set at=v1
390#endif
Atsushi Nemotof860c902006-12-13 01:22:06 +0900391
392LEAF(__csum_partial_copy_user)
393 PTR_ADDU AT, src, len /* See (1) above. */
394#ifdef CONFIG_64BIT
395 move errptr, a4
396#else
397 lw errptr, 16(sp)
398#endif
399FEXPORT(csum_partial_copy_nocheck)
400 move sum, zero
401 move odd, zero
402 /*
403 * Note: dst & src may be unaligned, len may be 0
404 * Temps
405 */
406 /*
407 * The "issue break"s below are very approximate.
408 * Issue delays for dcache fills will perturb the schedule, as will
409 * load queue full replay traps, etc.
410 *
411 * If len < NBYTES use byte operations.
412 */
413 sltu t2, len, NBYTES
414 and t1, dst, ADDRMASK
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000415 bnez t2, .Lcopy_bytes_checklen
Atsushi Nemotof860c902006-12-13 01:22:06 +0900416 and t0, src, ADDRMASK
417 andi odd, dst, 0x1 /* odd buffer? */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000418 bnez t1, .Ldst_unaligned
Atsushi Nemotof860c902006-12-13 01:22:06 +0900419 nop
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000420 bnez t0, .Lsrc_unaligned_dst_aligned
Atsushi Nemotof860c902006-12-13 01:22:06 +0900421 /*
422 * use delay slot for fall-through
423 * src and dst are aligned; need to compute rem
424 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000425.Lboth_aligned:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900426 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000427 beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900428 nop
429 SUB len, 8*NBYTES # subtract here for bgez loop
430 .align 4
4311:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000432EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
433EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
434EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
435EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
436EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
437EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy)
438EXC( LOAD t6, UNIT(6)(src), .Ll_exc_copy)
439EXC( LOAD t7, UNIT(7)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900440 SUB len, len, 8*NBYTES
441 ADD src, src, 8*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000442EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900443 ADDC(sum, t0)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000444EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900445 ADDC(sum, t1)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000446EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900447 ADDC(sum, t2)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000448EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900449 ADDC(sum, t3)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000450EXC( STORE t4, UNIT(4)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900451 ADDC(sum, t4)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000452EXC( STORE t5, UNIT(5)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900453 ADDC(sum, t5)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000454EXC( STORE t6, UNIT(6)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900455 ADDC(sum, t6)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000456EXC( STORE t7, UNIT(7)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900457 ADDC(sum, t7)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100458 .set reorder /* DADDI_WAR */
459 ADD dst, dst, 8*NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900460 bgez len, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100461 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +0900462 ADD len, 8*NBYTES # revert len (see above)
463
464 /*
465 * len == the number of bytes left to copy < 8*NBYTES
466 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000467.Lcleanup_both_aligned:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900468#define rem t7
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000469 beqz len, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900470 sltu t0, len, 4*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000471 bnez t0, .Lless_than_4units
Atsushi Nemotof860c902006-12-13 01:22:06 +0900472 and rem, len, (NBYTES-1) # rem = len % NBYTES
473 /*
474 * len >= 4*NBYTES
475 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000476EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
477EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
478EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
479EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900480 SUB len, len, 4*NBYTES
481 ADD src, src, 4*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000482EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900483 ADDC(sum, t0)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000484EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900485 ADDC(sum, t1)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000486EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900487 ADDC(sum, t2)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000488EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900489 ADDC(sum, t3)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100490 .set reorder /* DADDI_WAR */
491 ADD dst, dst, 4*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000492 beqz len, .Ldone
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100493 .set noreorder
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000494.Lless_than_4units:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900495 /*
496 * rem = len % NBYTES
497 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000498 beq rem, len, .Lcopy_bytes
Atsushi Nemotof860c902006-12-13 01:22:06 +0900499 nop
5001:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000501EXC( LOAD t0, 0(src), .Ll_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900502 ADD src, src, NBYTES
503 SUB len, len, NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000504EXC( STORE t0, 0(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900505 ADDC(sum, t0)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100506 .set reorder /* DADDI_WAR */
507 ADD dst, dst, NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900508 bne rem, len, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100509 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +0900510
511 /*
512 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
513 * A loop would do only a byte at a time with possible branch
514 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
515 * because can't assume read-access to dst. Instead, use
516 * STREST dst, which doesn't require read access to dst.
517 *
518 * This code should perform better than a simple loop on modern,
519 * wide-issue mips processors because the code has fewer branches and
520 * more instruction-level parallelism.
521 */
522#define bits t2
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000523 beqz len, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900524 ADD t1, dst, len # t1 is just past last byte of dst
525 li bits, 8*NBYTES
526 SLL rem, len, 3 # rem = number of bits to keep
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000527EXC( LOAD t0, 0(src), .Ll_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900528 SUB bits, bits, rem # bits = number of bits to discard
529 SHIFT_DISCARD t0, t0, bits
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000530EXC( STREST t0, -1(t1), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900531 SHIFT_DISCARD_REVERT t0, t0, bits
532 .set reorder
533 ADDC(sum, t0)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000534 b .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900535 .set noreorder
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000536.Ldst_unaligned:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900537 /*
538 * dst is unaligned
539 * t0 = src & ADDRMASK
540 * t1 = dst & ADDRMASK; T1 > 0
541 * len >= NBYTES
542 *
543 * Copy enough bytes to align dst
544 * Set match = (src and dst have same alignment)
545 */
546#define match rem
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000547EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900548 ADD t2, zero, NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000549EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900550 SUB t2, t2, t1 # t2 = number of bytes copied
551 xor match, t0, t1
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000552EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900553 SLL t4, t1, 3 # t4 = number of bits to discard
554 SHIFT_DISCARD t3, t3, t4
555 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
556 ADDC(sum, t3)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000557 beq len, t2, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900558 SUB len, len, t2
559 ADD dst, dst, t2
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000560 beqz match, .Lboth_aligned
Atsushi Nemotof860c902006-12-13 01:22:06 +0900561 ADD src, src, t2
562
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000563.Lsrc_unaligned_dst_aligned:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900564 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000565 beqz t0, .Lcleanup_src_unaligned
Atsushi Nemotof860c902006-12-13 01:22:06 +0900566 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
5671:
568/*
569 * Avoid consecutive LD*'s to the same register since some mips
570 * implementations can't issue them in the same cycle.
571 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
572 * are to the same unit (unless src is aligned, but it's not).
573 */
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000574EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
575EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900576 SUB len, len, 4*NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000577EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
578EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
579EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
580EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
581EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
582EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900583 ADD src, src, 4*NBYTES
584#ifdef CONFIG_CPU_SB1
585 nop # improves slotting
586#endif
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000587EXC( STORE t0, UNIT(0)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900588 ADDC(sum, t0)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000589EXC( STORE t1, UNIT(1)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900590 ADDC(sum, t1)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000591EXC( STORE t2, UNIT(2)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900592 ADDC(sum, t2)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000593EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900594 ADDC(sum, t3)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100595 .set reorder /* DADDI_WAR */
596 ADD dst, dst, 4*NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900597 bne len, rem, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100598 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +0900599
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000600.Lcleanup_src_unaligned:
601 beqz len, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900602 and rem, len, NBYTES-1 # rem = len % NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000603 beq rem, len, .Lcopy_bytes
Atsushi Nemotof860c902006-12-13 01:22:06 +0900604 nop
6051:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000606EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
607EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900608 ADD src, src, NBYTES
609 SUB len, len, NBYTES
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000610EXC( STORE t0, 0(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900611 ADDC(sum, t0)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100612 .set reorder /* DADDI_WAR */
613 ADD dst, dst, NBYTES
Atsushi Nemotof860c902006-12-13 01:22:06 +0900614 bne len, rem, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100615 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +0900616
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000617.Lcopy_bytes_checklen:
618 beqz len, .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900619 nop
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000620.Lcopy_bytes:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900621 /* 0 < len < NBYTES */
622#ifdef CONFIG_CPU_LITTLE_ENDIAN
623#define SHIFT_START 0
624#define SHIFT_INC 8
625#else
626#define SHIFT_START 8*(NBYTES-1)
627#define SHIFT_INC -8
628#endif
629 move t2, zero # partial word
630 li t3, SHIFT_START # shift
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000631/* use .Ll_exc_copy here to return correct sum on fault */
Atsushi Nemotof860c902006-12-13 01:22:06 +0900632#define COPY_BYTE(N) \
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000633EXC( lbu t0, N(src), .Ll_exc_copy); \
Atsushi Nemotof860c902006-12-13 01:22:06 +0900634 SUB len, len, 1; \
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000635EXC( sb t0, N(dst), .Ls_exc); \
Atsushi Nemotof860c902006-12-13 01:22:06 +0900636 SLLV t0, t0, t3; \
637 addu t3, SHIFT_INC; \
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000638 beqz len, .Lcopy_bytes_done; \
Atsushi Nemotof860c902006-12-13 01:22:06 +0900639 or t2, t0
640
641 COPY_BYTE(0)
642 COPY_BYTE(1)
643#ifdef USE_DOUBLE
644 COPY_BYTE(2)
645 COPY_BYTE(3)
646 COPY_BYTE(4)
647 COPY_BYTE(5)
648#endif
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000649EXC( lbu t0, NBYTES-2(src), .Ll_exc_copy)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900650 SUB len, len, 1
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000651EXC( sb t0, NBYTES-2(dst), .Ls_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900652 SLLV t0, t0, t3
653 or t2, t0
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000654.Lcopy_bytes_done:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900655 ADDC(sum, t2)
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000656.Ldone:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900657 /* fold checksum */
658#ifdef USE_DOUBLE
659 dsll32 v1, sum, 0
660 daddu sum, v1
661 sltu v1, sum, v1
662 dsra32 sum, sum, 0
663 addu sum, v1
664#endif
Atsushi Nemotof860c902006-12-13 01:22:06 +0900665
666 /* odd buffer alignment? */
667 beqz odd, 1f
668 nop
669 sll v1, sum, 8
670 srl sum, sum, 8
671 or sum, v1
672 andi sum, 0xffff
6731:
674 .set reorder
Atsushi Nemotob80a1b82008-09-20 17:20:04 +0200675 ADDC32(sum, psum)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900676 jr ra
677 .set noreorder
678
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000679.Ll_exc_copy:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900680 /*
681 * Copy bytes from src until faulting load address (or until a
682 * lb faults)
683 *
684 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
685 * may be more than a byte beyond the last address.
686 * Hence, the lb below may get an exception.
687 *
688 * Assumes src < THREAD_BUADDR($28)
689 */
690 LOAD t0, TI_TASK($28)
691 li t2, SHIFT_START
692 LOAD t0, THREAD_BUADDR(t0)
6931:
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000694EXC( lbu t1, 0(src), .Ll_exc)
Atsushi Nemotof860c902006-12-13 01:22:06 +0900695 ADD src, src, 1
696 sb t1, 0(dst) # can't fault -- we're copy_from_user
697 SLLV t1, t1, t2
698 addu t2, SHIFT_INC
699 ADDC(sum, t1)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100700 .set reorder /* DADDI_WAR */
701 ADD dst, dst, 1
Atsushi Nemotof860c902006-12-13 01:22:06 +0900702 bne src, t0, 1b
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100703 .set noreorder
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000704.Ll_exc:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900705 LOAD t0, TI_TASK($28)
706 nop
707 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
708 nop
709 SUB len, AT, t0 # len number of uncopied bytes
710 /*
711 * Here's where we rely on src and dst being incremented in tandem,
712 * See (3) above.
713 * dst += (fault addr - src) to put dst at first byte to clear
714 */
715 ADD dst, t0 # compute start address in a1
716 SUB dst, src
717 /*
718 * Clear len bytes starting at dst. Can't call __bzero because it
719 * might modify len. An inefficient loop for these rare times...
720 */
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100721 .set reorder /* DADDI_WAR */
722 SUB src, len, 1
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000723 beqz len, .Ldone
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100724 .set noreorder
Atsushi Nemotof860c902006-12-13 01:22:06 +09007251: sb zero, 0(dst)
726 ADD dst, dst, 1
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100727 .set push
728 .set noat
729#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
Atsushi Nemotof860c902006-12-13 01:22:06 +0900730 bnez src, 1b
731 SUB src, src, 1
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100732#else
733 li v1, 1
734 bnez src, 1b
735 SUB src, src, v1
736#endif
Atsushi Nemotof860c902006-12-13 01:22:06 +0900737 li v1, -EFAULT
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000738 b .Ldone
Atsushi Nemotof860c902006-12-13 01:22:06 +0900739 sw v1, (errptr)
740
Ralf Baechlec5ec1982008-01-29 10:14:59 +0000741.Ls_exc:
Atsushi Nemotof860c902006-12-13 01:22:06 +0900742 li v0, -1 /* invalid checksum */
743 li v1, -EFAULT
744 jr ra
745 sw v1, (errptr)
Maciej W. Rozycki619b6e12007-10-23 12:43:25 +0100746 .set pop
Atsushi Nemotof860c902006-12-13 01:22:06 +0900747 END(__csum_partial_copy_user)