| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * | 
|  | 3 | * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
|  | 4 | *		operating system.  INET is implemented using the  BSD Socket | 
|  | 5 | *		interface as the means of communication with the user level. | 
|  | 6 | * | 
|  | 7 | *		IP/TCP/UDP checksumming routines | 
|  | 8 | * | 
|  | 9 | * Authors:	Jorge Cwik, <jorge@laser.satlink.net> | 
|  | 10 | *		Arnt Gulbrandsen, <agulbra@nvg.unit.no> | 
|  | 11 | *		Tom May, <ftom@netcom.com> | 
|  | 12 | *		Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> | 
|  | 13 | *		Lots of code moved from tcp.c and ip.c; see those files | 
|  | 14 | *		for more names. | 
|  | 15 | * | 
|  | 16 | * 03/02/96	Jes Sorensen, Andreas Schwab, Roman Hodek: | 
|  | 17 | *		Fixed some nasty bugs, causing some horrible crashes. | 
|  | 18 | *		A: At some points, the sum (%0) was used as | 
|  | 19 | *		length-counter instead of the length counter | 
|  | 20 | *		(%1). Thanks to Roman Hodek for pointing this out. | 
|  | 21 | *		B: GCC seems to mess up if one uses too many | 
|  | 22 | *		data-registers to hold input values and one tries to | 
|  | 23 | *		specify d0 and d1 as scratch registers. Letting gcc | 
|  | 24 | *		choose these registers itself solves the problem. | 
|  | 25 | * | 
|  | 26 | *		This program is free software; you can redistribute it and/or | 
|  | 27 | *		modify it under the terms of the GNU General Public License | 
|  | 28 | *		as published by the Free Software Foundation; either version | 
|  | 29 | *		2 of the License, or (at your option) any later version. | 
|  | 30 | */ | 
|  | 31 |  | 
|  | 32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access | 
|  | 33 | kills, so most of the assembly has to go. */ | 
|  | 34 |  | 
|  | 35 | #include <linux/module.h> | 
|  | 36 | #include <net/checksum.h> | 
|  | 37 |  | 
|  | 38 | #include <asm/byteorder.h> | 
|  | 39 |  | 
| Arnd Bergmann | 20c1f64 | 2009-06-23 21:37:26 +0200 | [diff] [blame] | 40 | #ifndef do_csum | 
| Arnd Bergmann | c44ba9f | 2009-06-23 21:22:58 +0200 | [diff] [blame] | 41 | static inline unsigned short from32to16(unsigned int x) | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 42 | { | 
|  | 43 | /* add up 16-bit and 16-bit for 16+c bit */ | 
|  | 44 | x = (x & 0xffff) + (x >> 16); | 
|  | 45 | /* add up carry.. */ | 
|  | 46 | x = (x & 0xffff) + (x >> 16); | 
|  | 47 | return x; | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | static unsigned int do_csum(const unsigned char *buff, int len) | 
|  | 51 | { | 
|  | 52 | int odd, count; | 
| Arnd Bergmann | c44ba9f | 2009-06-23 21:22:58 +0200 | [diff] [blame] | 53 | unsigned int result = 0; | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 54 |  | 
|  | 55 | if (len <= 0) | 
|  | 56 | goto out; | 
|  | 57 | odd = 1 & (unsigned long) buff; | 
|  | 58 | if (odd) { | 
| Arnd Bergmann | 32a9ff9 | 2009-06-19 10:41:19 +0200 | [diff] [blame] | 59 | #ifdef __LITTLE_ENDIAN | 
| Arnd Bergmann | 32a9ff9 | 2009-06-19 10:41:19 +0200 | [diff] [blame] | 60 | result += (*buff << 8); | 
| Arnd Bergmann | 0a5549e | 2009-06-23 22:52:51 +0200 | [diff] [blame] | 61 | #else | 
|  | 62 | result = *buff; | 
| Arnd Bergmann | 32a9ff9 | 2009-06-19 10:41:19 +0200 | [diff] [blame] | 63 | #endif | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 64 | len--; | 
|  | 65 | buff++; | 
|  | 66 | } | 
|  | 67 | count = len >> 1;		/* nr of 16-bit words.. */ | 
|  | 68 | if (count) { | 
|  | 69 | if (2 & (unsigned long) buff) { | 
|  | 70 | result += *(unsigned short *) buff; | 
|  | 71 | count--; | 
|  | 72 | len -= 2; | 
|  | 73 | buff += 2; | 
|  | 74 | } | 
|  | 75 | count >>= 1;		/* nr of 32-bit words.. */ | 
|  | 76 | if (count) { | 
| Arnd Bergmann | c44ba9f | 2009-06-23 21:22:58 +0200 | [diff] [blame] | 77 | unsigned int carry = 0; | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 78 | do { | 
| Arnd Bergmann | c44ba9f | 2009-06-23 21:22:58 +0200 | [diff] [blame] | 79 | unsigned int w = *(unsigned int *) buff; | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 80 | count--; | 
|  | 81 | buff += 4; | 
|  | 82 | result += carry; | 
|  | 83 | result += w; | 
|  | 84 | carry = (w > result); | 
|  | 85 | } while (count); | 
|  | 86 | result += carry; | 
|  | 87 | result = (result & 0xffff) + (result >> 16); | 
|  | 88 | } | 
|  | 89 | if (len & 2) { | 
|  | 90 | result += *(unsigned short *) buff; | 
|  | 91 | buff += 2; | 
|  | 92 | } | 
|  | 93 | } | 
|  | 94 | if (len & 1) | 
| Arnd Bergmann | 32a9ff9 | 2009-06-19 10:41:19 +0200 | [diff] [blame] | 95 | #ifdef __LITTLE_ENDIAN | 
|  | 96 | result += *buff; | 
|  | 97 | #else | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 98 | result += (*buff << 8); | 
| Arnd Bergmann | 32a9ff9 | 2009-06-19 10:41:19 +0200 | [diff] [blame] | 99 | #endif | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 100 | result = from32to16(result); | 
|  | 101 | if (odd) | 
|  | 102 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); | 
|  | 103 | out: | 
|  | 104 | return result; | 
|  | 105 | } | 
| Arnd Bergmann | 20c1f64 | 2009-06-23 21:37:26 +0200 | [diff] [blame] | 106 | #endif | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 107 |  | 
|  | 108 | /* | 
|  | 109 | *	This is a version of ip_compute_csum() optimized for IP headers, | 
|  | 110 | *	which always checksum on 4 octet boundaries. | 
|  | 111 | */ | 
|  | 112 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | 
|  | 113 | { | 
|  | 114 | return (__force __sum16)~do_csum(iph, ihl*4); | 
|  | 115 | } | 
|  | 116 | EXPORT_SYMBOL(ip_fast_csum); | 
|  | 117 |  | 
|  | 118 | /* | 
|  | 119 | * computes the checksum of a memory block at buff, length len, | 
|  | 120 | * and adds in "sum" (32-bit) | 
|  | 121 | * | 
|  | 122 | * returns a 32-bit number suitable for feeding into itself | 
|  | 123 | * or csum_tcpudp_magic | 
|  | 124 | * | 
|  | 125 | * this function must be called with even lengths, except | 
|  | 126 | * for the last fragment, which may be odd | 
|  | 127 | * | 
|  | 128 | * it's best to have buff aligned on a 32-bit boundary | 
|  | 129 | */ | 
|  | 130 | __wsum csum_partial(const void *buff, int len, __wsum wsum) | 
|  | 131 | { | 
|  | 132 | unsigned int sum = (__force unsigned int)wsum; | 
|  | 133 | unsigned int result = do_csum(buff, len); | 
|  | 134 |  | 
|  | 135 | /* add in old sum, and carry.. */ | 
|  | 136 | result += sum; | 
|  | 137 | if (sum > result) | 
|  | 138 | result += 1; | 
|  | 139 | return (__force __wsum)result; | 
|  | 140 | } | 
|  | 141 | EXPORT_SYMBOL(csum_partial); | 
|  | 142 |  | 
|  | 143 | /* | 
|  | 144 | * this routine is used for miscellaneous IP-like checksums, mainly | 
|  | 145 | * in icmp.c | 
|  | 146 | */ | 
|  | 147 | __sum16 ip_compute_csum(const void *buff, int len) | 
|  | 148 | { | 
|  | 149 | return (__force __sum16)~do_csum(buff, len); | 
|  | 150 | } | 
|  | 151 | EXPORT_SYMBOL(ip_compute_csum); | 
|  | 152 |  | 
|  | 153 | /* | 
|  | 154 | * copy from fs while checksumming, otherwise like csum_partial | 
|  | 155 | */ | 
|  | 156 | __wsum | 
|  | 157 | csum_partial_copy_from_user(const void __user *src, void *dst, int len, | 
|  | 158 | __wsum sum, int *csum_err) | 
|  | 159 | { | 
|  | 160 | int missing; | 
|  | 161 |  | 
|  | 162 | missing = __copy_from_user(dst, src, len); | 
|  | 163 | if (missing) { | 
|  | 164 | memset(dst + len - missing, 0, missing); | 
|  | 165 | *csum_err = -EFAULT; | 
|  | 166 | } else | 
|  | 167 | *csum_err = 0; | 
|  | 168 |  | 
|  | 169 | return csum_partial(dst, len, sum); | 
|  | 170 | } | 
|  | 171 | EXPORT_SYMBOL(csum_partial_copy_from_user); | 
|  | 172 |  | 
|  | 173 | /* | 
|  | 174 | * copy from ds while checksumming, otherwise like csum_partial | 
|  | 175 | */ | 
|  | 176 | __wsum | 
|  | 177 | csum_partial_copy(const void *src, void *dst, int len, __wsum sum) | 
|  | 178 | { | 
|  | 179 | memcpy(dst, src, len); | 
|  | 180 | return csum_partial(dst, len, sum); | 
|  | 181 | } | 
|  | 182 | EXPORT_SYMBOL(csum_partial_copy); | 
|  | 183 |  | 
|  | 184 | #ifndef csum_tcpudp_nofold | 
|  | 185 | __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | 
|  | 186 | unsigned short len, | 
|  | 187 | unsigned short proto, | 
|  | 188 | __wsum sum) | 
|  | 189 | { | 
|  | 190 | unsigned long long s = (__force u32)sum; | 
|  | 191 |  | 
|  | 192 | s += (__force u32)saddr; | 
|  | 193 | s += (__force u32)daddr; | 
|  | 194 | #ifdef __BIG_ENDIAN | 
|  | 195 | s += proto + len; | 
|  | 196 | #else | 
|  | 197 | s += (proto + len) << 8; | 
|  | 198 | #endif | 
|  | 199 | s += (s >> 32); | 
|  | 200 | return (__force __wsum)s; | 
|  | 201 | } | 
|  | 202 | EXPORT_SYMBOL(csum_tcpudp_nofold); | 
|  | 203 | #endif |