| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 1 | #ifndef __ASM_GENERIC_CHECKSUM_H | 
|  | 2 | #define __ASM_GENERIC_CHECKSUM_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * computes the checksum of a memory block at buff, length len, | 
|  | 6 | * and adds in "sum" (32-bit) | 
|  | 7 | * | 
|  | 8 | * returns a 32-bit number suitable for feeding into itself | 
|  | 9 | * or csum_tcpudp_magic | 
|  | 10 | * | 
|  | 11 | * this function must be called with even lengths, except | 
|  | 12 | * for the last fragment, which may be odd | 
|  | 13 | * | 
|  | 14 | * it's best to have buff aligned on a 32-bit boundary | 
|  | 15 | */ | 
|  | 16 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); | 
|  | 17 |  | 
|  | 18 | /* | 
|  | 19 | * the same as csum_partial, but copies from src while it | 
|  | 20 | * checksums | 
|  | 21 | * | 
|  | 22 | * here even more important to align src and dst on a 32-bit (or even | 
|  | 23 | * better 64-bit) boundary | 
|  | 24 | */ | 
|  | 25 | extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); | 
|  | 26 |  | 
|  | 27 | /* | 
|  | 28 | * the same as csum_partial_copy, but copies from user space. | 
|  | 29 | * | 
|  | 30 | * here even more important to align src and dst on a 32-bit (or even | 
|  | 31 | * better 64-bit) boundary | 
|  | 32 | */ | 
|  | 33 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | 
|  | 34 | int len, __wsum sum, int *csum_err); | 
|  | 35 |  | 
| Linas Vepstas | 4e29198 | 2011-10-31 18:56:59 -0500 | [diff] [blame] | 36 | #ifndef csum_partial_copy_nocheck | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 37 | #define csum_partial_copy_nocheck(src, dst, len, sum)	\ | 
|  | 38 | csum_partial_copy((src), (dst), (len), (sum)) | 
| Linas Vepstas | 4e29198 | 2011-10-31 18:56:59 -0500 | [diff] [blame] | 39 | #endif | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 40 |  | 
|  | 41 | /* | 
|  | 42 | * This is a version of ip_compute_csum() optimized for IP headers, | 
|  | 43 | * which always checksum on 4 octet boundaries. | 
|  | 44 | */ | 
|  | 45 | extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); | 
|  | 46 |  | 
|  | 47 | /* | 
|  | 48 | * Fold a partial checksum | 
|  | 49 | */ | 
|  | 50 | static inline __sum16 csum_fold(__wsum csum) | 
|  | 51 | { | 
|  | 52 | u32 sum = (__force u32)csum; | 
|  | 53 | sum = (sum & 0xffff) + (sum >> 16); | 
|  | 54 | sum = (sum & 0xffff) + (sum >> 16); | 
|  | 55 | return (__force __sum16)~sum; | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | #ifndef csum_tcpudp_nofold | 
|  | 59 | /* | 
|  | 60 | * computes the checksum of the TCP/UDP pseudo-header | 
|  | 61 | * returns a 16-bit checksum, already complemented | 
|  | 62 | */ | 
|  | 63 | extern __wsum | 
|  | 64 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, | 
|  | 65 | unsigned short proto, __wsum sum); | 
|  | 66 | #endif | 
|  | 67 |  | 
| Linas Vepstas | 4e29198 | 2011-10-31 18:56:59 -0500 | [diff] [blame] | 68 | #ifndef csum_tcpudp_magic | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 69 | static inline __sum16 | 
|  | 70 | csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, | 
|  | 71 | unsigned short proto, __wsum sum) | 
|  | 72 | { | 
|  | 73 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); | 
|  | 74 | } | 
| Linas Vepstas | 4e29198 | 2011-10-31 18:56:59 -0500 | [diff] [blame] | 75 | #endif | 
| Arnd Bergmann | 26a28fa | 2009-05-13 22:56:38 +0000 | [diff] [blame] | 76 |  | 
|  | 77 | /* | 
|  | 78 | * this routine is used for miscellaneous IP-like checksums, mainly | 
|  | 79 | * in icmp.c | 
|  | 80 | */ | 
|  | 81 | extern __sum16 ip_compute_csum(const void *buff, int len); | 
|  | 82 |  | 
|  | 83 | #endif /* __ASM_GENERIC_CHECKSUM_H */ |