| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file contains assembly-language implementations | 
 | 3 |  * of IP-style 1's complement checksum routines. | 
 | 4 |  *	 | 
 | 5 |  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
 | 6 |  * | 
 | 7 |  *  This program is free software; you can redistribute it and/or | 
 | 8 |  *  modify it under the terms of the GNU General Public License | 
 | 9 |  *  as published by the Free Software Foundation; either version | 
 | 10 |  *  2 of the License, or (at your option) any later version. | 
 | 11 |  * | 
 | 12 |  * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au). | 
 | 13 |  */ | 
 | 14 |  | 
 | 15 | #include <linux/sys.h> | 
 | 16 | #include <asm/processor.h> | 
 | 17 | #include <asm/errno.h> | 
 | 18 | #include <asm/ppc_asm.h> | 
 | 19 |  | 
 | 20 | 	.text | 
 | 21 |  | 
 | 22 | /* | 
 | 23 |  * ip_fast_csum(buf, len) -- Optimized for IP header | 
 | 24 |  * len is in words and is always >= 5. | 
 | 25 |  */ | 
 | 26 | _GLOBAL(ip_fast_csum) | 
 | 27 | 	lwz	r0,0(r3) | 
 | 28 | 	lwzu	r5,4(r3) | 
 | 29 | 	addic.	r4,r4,-2 | 
 | 30 | 	addc	r0,r0,r5 | 
 | 31 | 	mtctr	r4 | 
 | 32 | 	blelr- | 
 | 33 | 1:	lwzu	r4,4(r3) | 
 | 34 | 	adde	r0,r0,r4 | 
 | 35 | 	bdnz	1b | 
 | 36 | 	addze	r0,r0		/* add in final carry */ | 
 | 37 | 	rlwinm	r3,r0,16,0,31	/* fold two halves together */ | 
 | 38 | 	add	r3,r0,r3 | 
 | 39 | 	not	r3,r3 | 
 | 40 | 	srwi	r3,r3,16 | 
 | 41 | 	blr | 
 | 42 |  | 
 | 43 | /* | 
 | 44 |  * Compute checksum of TCP or UDP pseudo-header: | 
 | 45 |  *   csum_tcpudp_magic(saddr, daddr, len, proto, sum) | 
 | 46 |  */	 | 
 | 47 | _GLOBAL(csum_tcpudp_magic) | 
 | 48 | 	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */ | 
 | 49 | 	addc	r0,r3,r4	/* add 4 32-bit words together */ | 
 | 50 | 	adde	r0,r0,r5 | 
 | 51 | 	adde	r0,r0,r7 | 
 | 52 | 	addze	r0,r0		/* add in final carry */ | 
 | 53 | 	rlwinm	r3,r0,16,0,31	/* fold two halves together */ | 
 | 54 | 	add	r3,r0,r3 | 
 | 55 | 	not	r3,r3 | 
 | 56 | 	srwi	r3,r3,16 | 
 | 57 | 	blr | 
 | 58 |  | 
 | 59 | /* | 
 | 60 |  * computes the checksum of a memory block at buff, length len, | 
 | 61 |  * and adds in "sum" (32-bit) | 
 | 62 |  * | 
 | 63 |  * csum_partial(buff, len, sum) | 
 | 64 |  */ | 
 | 65 | _GLOBAL(csum_partial) | 
 | 66 | 	addic	r0,r5,0 | 
 | 67 | 	subi	r3,r3,4 | 
 | 68 | 	srwi.	r6,r4,2 | 
 | 69 | 	beq	3f		/* if we're doing < 4 bytes */ | 
 | 70 | 	andi.	r5,r3,2		/* Align buffer to longword boundary */ | 
 | 71 | 	beq+	1f | 
 | 72 | 	lhz	r5,4(r3)	/* do 2 bytes to get aligned */ | 
 | 73 | 	addi	r3,r3,2 | 
 | 74 | 	subi	r4,r4,2 | 
 | 75 | 	addc	r0,r0,r5 | 
 | 76 | 	srwi.	r6,r4,2		/* # words to do */ | 
 | 77 | 	beq	3f | 
 | 78 | 1:	mtctr	r6 | 
 | 79 | 2:	lwzu	r5,4(r3)	/* the bdnz has zero overhead, so it should */ | 
 | 80 | 	adde	r0,r0,r5	/* be unnecessary to unroll this loop */ | 
 | 81 | 	bdnz	2b | 
 | 82 | 	andi.	r4,r4,3 | 
 | 83 | 3:	cmpwi	0,r4,2 | 
 | 84 | 	blt+	4f | 
 | 85 | 	lhz	r5,4(r3) | 
 | 86 | 	addi	r3,r3,2 | 
 | 87 | 	subi	r4,r4,2 | 
 | 88 | 	adde	r0,r0,r5 | 
 | 89 | 4:	cmpwi	0,r4,1 | 
 | 90 | 	bne+	5f | 
 | 91 | 	lbz	r5,4(r3) | 
 | 92 | 	slwi	r5,r5,8		/* Upper byte of word */ | 
 | 93 | 	adde	r0,r0,r5 | 
 | 94 | 5:	addze	r3,r0		/* add in final carry */ | 
 | 95 | 	blr | 
 | 96 |  | 
 | 97 | /* | 
 | 98 |  * Computes the checksum of a memory block at src, length len, | 
 | 99 |  * and adds in "sum" (32-bit), while copying the block to dst. | 
 | 100 |  * If an access exception occurs on src or dst, it stores -EFAULT | 
 | 101 |  * to *src_err or *dst_err respectively, and (for an error on | 
 | 102 |  * src) zeroes the rest of dst. | 
 | 103 |  * | 
 | 104 |  * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err) | 
 | 105 |  */ | 
 | 106 | _GLOBAL(csum_partial_copy_generic) | 
 | 107 | 	addic	r0,r6,0 | 
 | 108 | 	subi	r3,r3,4 | 
 | 109 | 	subi	r4,r4,4 | 
 | 110 | 	srwi.	r6,r5,2 | 
 | 111 | 	beq	3f		/* if we're doing < 4 bytes */ | 
 | 112 | 	andi.	r9,r4,2		/* Align dst to longword boundary */ | 
 | 113 | 	beq+	1f | 
 | 114 | 81:	lhz	r6,4(r3)	/* do 2 bytes to get aligned */ | 
 | 115 | 	addi	r3,r3,2 | 
 | 116 | 	subi	r5,r5,2 | 
 | 117 | 91:	sth	r6,4(r4) | 
 | 118 | 	addi	r4,r4,2 | 
 | 119 | 	addc	r0,r0,r6 | 
 | 120 | 	srwi.	r6,r5,2		/* # words to do */ | 
 | 121 | 	beq	3f | 
 | 122 | 1:	srwi.	r6,r5,4		/* # groups of 4 words to do */ | 
 | 123 | 	beq	10f | 
 | 124 | 	mtctr	r6 | 
 | 125 | 71:	lwz	r6,4(r3) | 
 | 126 | 72:	lwz	r9,8(r3) | 
 | 127 | 73:	lwz	r10,12(r3) | 
 | 128 | 74:	lwzu	r11,16(r3) | 
 | 129 | 	adde	r0,r0,r6 | 
 | 130 | 75:	stw	r6,4(r4) | 
 | 131 | 	adde	r0,r0,r9 | 
 | 132 | 76:	stw	r9,8(r4) | 
 | 133 | 	adde	r0,r0,r10 | 
 | 134 | 77:	stw	r10,12(r4) | 
 | 135 | 	adde	r0,r0,r11 | 
 | 136 | 78:	stwu	r11,16(r4) | 
 | 137 | 	bdnz	71b | 
 | 138 | 10:	rlwinm.	r6,r5,30,30,31	/* # words left to do */ | 
 | 139 | 	beq	13f | 
 | 140 | 	mtctr	r6 | 
 | 141 | 82:	lwzu	r9,4(r3) | 
 | 142 | 92:	stwu	r9,4(r4) | 
 | 143 | 	adde	r0,r0,r9 | 
 | 144 | 	bdnz	82b | 
 | 145 | 13:	andi.	r5,r5,3 | 
 | 146 | 3:	cmpwi	0,r5,2 | 
 | 147 | 	blt+	4f | 
 | 148 | 83:	lhz	r6,4(r3) | 
 | 149 | 	addi	r3,r3,2 | 
 | 150 | 	subi	r5,r5,2 | 
 | 151 | 93:	sth	r6,4(r4) | 
 | 152 | 	addi	r4,r4,2 | 
 | 153 | 	adde	r0,r0,r6 | 
 | 154 | 4:	cmpwi	0,r5,1 | 
 | 155 | 	bne+	5f | 
 | 156 | 84:	lbz	r6,4(r3) | 
 | 157 | 94:	stb	r6,4(r4) | 
 | 158 | 	slwi	r6,r6,8		/* Upper byte of word */ | 
 | 159 | 	adde	r0,r0,r6 | 
 | 160 | 5:	addze	r3,r0		/* add in final carry */ | 
 | 161 | 	blr | 
 | 162 |  | 
 | 163 | /* These shouldn't go in the fixup section, since that would | 
 | 164 |    cause the ex_table addresses to get out of order. */ | 
 | 165 |  | 
 | 166 | src_error_4: | 
 | 167 | 	mfctr	r6		/* update # bytes remaining from ctr */ | 
 | 168 | 	rlwimi	r5,r6,4,0,27 | 
 | 169 | 	b	79f | 
 | 170 | src_error_1: | 
 | 171 | 	li	r6,0 | 
 | 172 | 	subi	r5,r5,2 | 
 | 173 | 95:	sth	r6,4(r4) | 
 | 174 | 	addi	r4,r4,2 | 
 | 175 | 79:	srwi.	r6,r5,2 | 
 | 176 | 	beq	3f | 
 | 177 | 	mtctr	r6 | 
 | 178 | src_error_2: | 
 | 179 | 	li	r6,0 | 
 | 180 | 96:	stwu	r6,4(r4) | 
 | 181 | 	bdnz	96b | 
 | 182 | 3:	andi.	r5,r5,3 | 
 | 183 | 	beq	src_error | 
 | 184 | src_error_3: | 
 | 185 | 	li	r6,0 | 
 | 186 | 	mtctr	r5 | 
 | 187 | 	addi	r4,r4,3 | 
 | 188 | 97:	stbu	r6,1(r4) | 
 | 189 | 	bdnz	97b | 
 | 190 | src_error: | 
 | 191 | 	cmpwi	0,r7,0 | 
 | 192 | 	beq	1f | 
 | 193 | 	li	r6,-EFAULT | 
 | 194 | 	stw	r6,0(r7) | 
 | 195 | 1:	addze	r3,r0 | 
 | 196 | 	blr | 
 | 197 |  | 
 | 198 | dst_error: | 
 | 199 | 	cmpwi	0,r8,0 | 
 | 200 | 	beq	1f | 
 | 201 | 	li	r6,-EFAULT | 
 | 202 | 	stw	r6,0(r8) | 
 | 203 | 1:	addze	r3,r0 | 
 | 204 | 	blr | 
 | 205 |  | 
 | 206 | .section __ex_table,"a" | 
 | 207 | 	.long	81b,src_error_1 | 
 | 208 | 	.long	91b,dst_error | 
 | 209 | 	.long	71b,src_error_4 | 
 | 210 | 	.long	72b,src_error_4 | 
 | 211 | 	.long	73b,src_error_4 | 
 | 212 | 	.long	74b,src_error_4 | 
 | 213 | 	.long	75b,dst_error | 
 | 214 | 	.long	76b,dst_error | 
 | 215 | 	.long	77b,dst_error | 
 | 216 | 	.long	78b,dst_error | 
 | 217 | 	.long	82b,src_error_2 | 
 | 218 | 	.long	92b,dst_error | 
 | 219 | 	.long	83b,src_error_3 | 
 | 220 | 	.long	93b,dst_error | 
 | 221 | 	.long	84b,src_error_3 | 
 | 222 | 	.long	94b,dst_error | 
 | 223 | 	.long	95b,dst_error | 
 | 224 | 	.long	96b,dst_error | 
 | 225 | 	.long	97b,dst_error |