| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
 | 3 |  *		operating system.  INET is implemented using the  BSD Socket | 
 | 4 |  *		interface as the means of communication with the user level. | 
 | 5 |  * | 
 | 6 |  *		Definitions for the TCP module. | 
 | 7 |  * | 
 | 8 |  * Version:	@(#)tcp.h	1.0.5	05/23/93 | 
 | 9 |  * | 
| Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 10 |  * Authors:	Ross Biro | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 
 | 12 |  * | 
 | 13 |  *		This program is free software; you can redistribute it and/or | 
 | 14 |  *		modify it under the terms of the GNU General Public License | 
 | 15 |  *		as published by the Free Software Foundation; either version | 
 | 16 |  *		2 of the License, or (at your option) any later version. | 
 | 17 |  */ | 
 | 18 | #ifndef _TCP_H | 
 | 19 | #define _TCP_H | 
 | 20 |  | 
 | 21 | #define TCP_DEBUG 1 | 
 | 22 | #define FASTRETRANS_DEBUG 1 | 
 | 23 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/list.h> | 
 | 25 | #include <linux/tcp.h> | 
 | 26 | #include <linux/slab.h> | 
 | 27 | #include <linux/cache.h> | 
 | 28 | #include <linux/percpu.h> | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 29 | #include <linux/skbuff.h> | 
| Chris Leech | 97fc2f0 | 2006-05-23 17:55:33 -0700 | [diff] [blame] | 30 | #include <linux/dmaengine.h> | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 31 | #include <linux/crypto.h> | 
| Glenn Griffin | c6aefaf | 2008-02-07 21:49:26 -0800 | [diff] [blame] | 32 | #include <linux/cryptohash.h> | 
| Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 33 |  | 
 | 34 | #include <net/inet_connection_sock.h> | 
| Arnaldo Carvalho de Melo | 295ff7e | 2005-08-09 20:44:40 -0700 | [diff] [blame] | 35 | #include <net/inet_timewait_sock.h> | 
| Arnaldo Carvalho de Melo | 77d8bf9 | 2005-08-09 20:00:51 -0700 | [diff] [blame] | 36 | #include <net/inet_hashtables.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <net/checksum.h> | 
| Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 38 | #include <net/request_sock.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <net/sock.h> | 
 | 40 | #include <net/snmp.h> | 
 | 41 | #include <net/ip.h> | 
| Arnaldo Carvalho de Melo | c752f07 | 2005-08-09 20:08:28 -0700 | [diff] [blame] | 42 | #include <net/tcp_states.h> | 
| Ilpo Järvinen | bdf1ee5 | 2007-05-27 02:04:16 -0700 | [diff] [blame] | 43 | #include <net/inet_ecn.h> | 
| Satoru SATOH | 0c26689 | 2009-05-04 11:11:01 -0700 | [diff] [blame] | 44 | #include <net/dst.h> | 
| Arnaldo Carvalho de Melo | c752f07 | 2005-08-09 20:08:28 -0700 | [diff] [blame] | 45 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <linux/seq_file.h> | 
 | 47 |  | 
| Arnaldo Carvalho de Melo | 6e04e02 | 2005-08-09 20:07:35 -0700 | [diff] [blame] | 48 | extern struct inet_hashinfo tcp_hashinfo; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 |  | 
| Eric Dumazet | dd24c00 | 2008-11-25 21:17:14 -0800 | [diff] [blame] | 50 | extern struct percpu_counter tcp_orphan_count; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | extern void tcp_time_wait(struct sock *sk, int state, int timeo); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #define MAX_TCP_HEADER	(128 + MAX_HEADER) | 
| Adam Langley | 33ad798 | 2008-07-19 00:04:31 -0700 | [diff] [blame] | 54 | #define MAX_TCP_OPTION_SPACE 40 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
 | 56 | /*  | 
 | 57 |  * Never offer a window over 32767 without using window scaling. Some | 
 | 58 |  * poor stacks do signed 16bit maths!  | 
 | 59 |  */ | 
 | 60 | #define MAX_TCP_WINDOW		32767U | 
 | 61 |  | 
 | 62 | /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ | 
 | 63 | #define TCP_MIN_MSS		88U | 
 | 64 |  | 
 | 65 | /* Minimal RCV_MSS. */ | 
 | 66 | #define TCP_MIN_RCVMSS		536U | 
 | 67 |  | 
| John Heffner | 5d424d5 | 2006-03-20 17:53:41 -0800 | [diff] [blame] | 68 | /* The least MTU to use for probing */ | 
 | 69 | #define TCP_BASE_MSS		512 | 
 | 70 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | /* After receiving this amount of duplicate ACKs fast retransmit starts. */ | 
 | 72 | #define TCP_FASTRETRANS_THRESH 3 | 
 | 73 |  | 
 | 74 | /* Maximal reordering. */ | 
 | 75 | #define TCP_MAX_REORDERING	127 | 
 | 76 |  | 
 | 77 | /* Maximal number of ACKs sent quickly to accelerate slow-start. */ | 
 | 78 | #define TCP_MAX_QUICKACKS	16U | 
 | 79 |  | 
 | 80 | /* urg_data states */ | 
 | 81 | #define TCP_URG_VALID	0x0100 | 
 | 82 | #define TCP_URG_NOTYET	0x0200 | 
 | 83 | #define TCP_URG_READ	0x0400 | 
 | 84 |  | 
 | 85 | #define TCP_RETR1	3	/* | 
 | 86 | 				 * This is how many retries it does before it | 
 | 87 | 				 * tries to figure out if the gateway is | 
 | 88 | 				 * down. Minimal RFC value is 3; it corresponds | 
 | 89 | 				 * to ~3sec-8min depending on RTO. | 
 | 90 | 				 */ | 
 | 91 |  | 
 | 92 | #define TCP_RETR2	15	/* | 
 | 93 | 				 * This should take at least | 
 | 94 | 				 * 90 minutes to time out. | 
 | 95 | 				 * RFC1122 says that the limit is 100 sec. | 
 | 96 | 				 * 15 is ~13-30min depending on RTO. | 
 | 97 | 				 */ | 
 | 98 |  | 
 | 99 | #define TCP_SYN_RETRIES	 5	/* number of times to retry active opening a | 
| Stephen Hemminger | caa20d9a | 2005-11-10 17:13:47 -0800 | [diff] [blame] | 100 | 				 * connection: ~180sec is RFC minimum	*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 |  | 
 | 102 | #define TCP_SYNACK_RETRIES 5	/* number of times to retry passive opening a | 
| Stephen Hemminger | caa20d9a | 2005-11-10 17:13:47 -0800 | [diff] [blame] | 103 | 				 * connection: ~180sec is RFC minimum	*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 |  | 
 | 105 |  | 
 | 106 | #define TCP_ORPHAN_RETRIES 7	/* number of times to retry on an orphaned | 
 | 107 | 				 * socket. 7 is ~50sec-16min. | 
 | 108 | 				 */ | 
 | 109 |  | 
 | 110 |  | 
 | 111 | #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT | 
 | 112 | 				  * state, about 60 seconds	*/ | 
 | 113 | #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN | 
 | 114 |                                  /* BSD style FIN_WAIT2 deadlock breaker. | 
 | 115 | 				  * It used to be 3min, new value is 60sec, | 
 | 116 | 				  * to combine FIN-WAIT-2 timeout with | 
 | 117 | 				  * TIME-WAIT timer. | 
 | 118 | 				  */ | 
 | 119 |  | 
 | 120 | #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */ | 
 | 121 | #if HZ >= 100 | 
 | 122 | #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */ | 
 | 123 | #define TCP_ATO_MIN	((unsigned)(HZ/25)) | 
 | 124 | #else | 
 | 125 | #define TCP_DELACK_MIN	4U | 
 | 126 | #define TCP_ATO_MIN	4U | 
 | 127 | #endif | 
 | 128 | #define TCP_RTO_MAX	((unsigned)(120*HZ)) | 
 | 129 | #define TCP_RTO_MIN	((unsigned)(HZ/5)) | 
 | 130 | #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value	*/ | 
 | 131 |  | 
 | 132 | #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes | 
 | 133 | 					                 * for local resources. | 
 | 134 | 					                 */ | 
 | 135 |  | 
 | 136 | #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */ | 
 | 137 | #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/ | 
 | 138 | #define TCP_KEEPALIVE_INTVL	(75*HZ) | 
 | 139 |  | 
 | 140 | #define MAX_TCP_KEEPIDLE	32767 | 
 | 141 | #define MAX_TCP_KEEPINTVL	32767 | 
 | 142 | #define MAX_TCP_KEEPCNT		127 | 
 | 143 | #define MAX_TCP_SYNCNT		127 | 
 | 144 |  | 
 | 145 | #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 |  | 
 | 147 | #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24) | 
 | 148 | #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated | 
 | 149 | 					 * after this time. It should be equal | 
 | 150 | 					 * (or greater than) TCP_TIMEWAIT_LEN | 
 | 151 | 					 * to provide reliability equal to one | 
 | 152 | 					 * provided by timewait state. | 
 | 153 | 					 */ | 
 | 154 | #define TCP_PAWS_WINDOW	1		/* Replay window for per-host | 
 | 155 | 					 * timestamps. It must be less than | 
 | 156 | 					 * minimal timewait lifetime. | 
 | 157 | 					 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | /* | 
 | 159 |  *	TCP option | 
 | 160 |  */ | 
 | 161 |   | 
 | 162 | #define TCPOPT_NOP		1	/* Padding */ | 
 | 163 | #define TCPOPT_EOL		0	/* End of options */ | 
 | 164 | #define TCPOPT_MSS		2	/* Segment size negotiating */ | 
 | 165 | #define TCPOPT_WINDOW		3	/* Window scaling */ | 
 | 166 | #define TCPOPT_SACK_PERM        4       /* SACK Permitted */ | 
 | 167 | #define TCPOPT_SACK             5       /* SACK Block */ | 
 | 168 | #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */ | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 169 | #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 |  | 
 | 171 | /* | 
 | 172 |  *     TCP option lengths | 
 | 173 |  */ | 
 | 174 |  | 
 | 175 | #define TCPOLEN_MSS            4 | 
 | 176 | #define TCPOLEN_WINDOW         3 | 
 | 177 | #define TCPOLEN_SACK_PERM      2 | 
 | 178 | #define TCPOLEN_TIMESTAMP      10 | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 179 | #define TCPOLEN_MD5SIG         18 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 |  | 
 | 181 | /* But this is what stacks really send out. */ | 
 | 182 | #define TCPOLEN_TSTAMP_ALIGNED		12 | 
 | 183 | #define TCPOLEN_WSCALE_ALIGNED		4 | 
 | 184 | #define TCPOLEN_SACKPERM_ALIGNED	4 | 
 | 185 | #define TCPOLEN_SACK_BASE		2 | 
 | 186 | #define TCPOLEN_SACK_BASE_ALIGNED	4 | 
 | 187 | #define TCPOLEN_SACK_PERBLOCK		8 | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 188 | #define TCPOLEN_MD5SIG_ALIGNED		20 | 
| Adam Langley | 33ad798 | 2008-07-19 00:04:31 -0700 | [diff] [blame] | 189 | #define TCPOLEN_MSS_ALIGNED		4 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | /* Flags in tp->nonagle */ | 
 | 192 | #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */ | 
 | 193 | #define TCP_NAGLE_CORK		2	/* Socket is corked	    */ | 
| Stephen Hemminger | caa20d9a | 2005-11-10 17:13:47 -0800 | [diff] [blame] | 194 | #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 |  | 
| Arnaldo Carvalho de Melo | 295ff7e | 2005-08-09 20:44:40 -0700 | [diff] [blame] | 196 | extern struct inet_timewait_death_row tcp_death_row; | 
 | 197 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | /* sysctl variables for tcp */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | extern int sysctl_tcp_timestamps; | 
 | 200 | extern int sysctl_tcp_window_scaling; | 
 | 201 | extern int sysctl_tcp_sack; | 
 | 202 | extern int sysctl_tcp_fin_timeout; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | extern int sysctl_tcp_keepalive_time; | 
 | 204 | extern int sysctl_tcp_keepalive_probes; | 
 | 205 | extern int sysctl_tcp_keepalive_intvl; | 
 | 206 | extern int sysctl_tcp_syn_retries; | 
 | 207 | extern int sysctl_tcp_synack_retries; | 
 | 208 | extern int sysctl_tcp_retries1; | 
 | 209 | extern int sysctl_tcp_retries2; | 
 | 210 | extern int sysctl_tcp_orphan_retries; | 
 | 211 | extern int sysctl_tcp_syncookies; | 
 | 212 | extern int sysctl_tcp_retrans_collapse; | 
 | 213 | extern int sysctl_tcp_stdurg; | 
 | 214 | extern int sysctl_tcp_rfc1337; | 
 | 215 | extern int sysctl_tcp_abort_on_overflow; | 
 | 216 | extern int sysctl_tcp_max_orphans; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | extern int sysctl_tcp_fack; | 
 | 218 | extern int sysctl_tcp_reordering; | 
 | 219 | extern int sysctl_tcp_ecn; | 
 | 220 | extern int sysctl_tcp_dsack; | 
 | 221 | extern int sysctl_tcp_mem[3]; | 
 | 222 | extern int sysctl_tcp_wmem[3]; | 
 | 223 | extern int sysctl_tcp_rmem[3]; | 
 | 224 | extern int sysctl_tcp_app_win; | 
 | 225 | extern int sysctl_tcp_adv_win_scale; | 
 | 226 | extern int sysctl_tcp_tw_reuse; | 
 | 227 | extern int sysctl_tcp_frto; | 
| Ilpo Järvinen | 3cfe3ba | 2007-02-27 10:09:49 -0800 | [diff] [blame] | 228 | extern int sysctl_tcp_frto_response; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | extern int sysctl_tcp_low_latency; | 
| Chris Leech | 9593782 | 2006-05-23 18:02:55 -0700 | [diff] [blame] | 230 | extern int sysctl_tcp_dma_copybreak; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | extern int sysctl_tcp_nometrics_save; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | extern int sysctl_tcp_moderate_rcvbuf; | 
 | 233 | extern int sysctl_tcp_tso_win_divisor; | 
| Stephen Hemminger | 9772efb | 2005-11-10 17:09:53 -0800 | [diff] [blame] | 234 | extern int sysctl_tcp_abc; | 
| John Heffner | 5d424d5 | 2006-03-20 17:53:41 -0800 | [diff] [blame] | 235 | extern int sysctl_tcp_mtu_probing; | 
 | 236 | extern int sysctl_tcp_base_mss; | 
| Rick Jones | 15d99e0 | 2006-03-20 22:40:29 -0800 | [diff] [blame] | 237 | extern int sysctl_tcp_workaround_signed_windows; | 
| David S. Miller | 35089bb | 2006-06-13 22:33:04 -0700 | [diff] [blame] | 238 | extern int sysctl_tcp_slow_start_after_idle; | 
| John Heffner | 886236c | 2007-03-25 19:21:45 -0700 | [diff] [blame] | 239 | extern int sysctl_tcp_max_ssthresh; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 |  | 
 | 241 | extern atomic_t tcp_memory_allocated; | 
| Eric Dumazet | 1748376 | 2008-11-25 21:16:35 -0800 | [diff] [blame] | 242 | extern struct percpu_counter tcp_sockets_allocated; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | extern int tcp_memory_pressure; | 
 | 244 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 |  * The next routines deal with comparing 32 bit unsigned ints | 
 | 247 |  * and worry about wraparound (automatic with unsigned arithmetic). | 
 | 248 |  */ | 
 | 249 |  | 
 | 250 | static inline int before(__u32 seq1, __u32 seq2) | 
 | 251 | { | 
| Gerrit Renker | 0d630cc | 2007-01-04 12:25:16 -0800 | [diff] [blame] | 252 |         return (__s32)(seq1-seq2) < 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | } | 
| Gerrit Renker | 9a036b9 | 2006-12-20 10:25:55 -0800 | [diff] [blame] | 254 | #define after(seq2, seq1) 	before(seq1, seq2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 |  | 
 | 256 | /* is s2<=s1<=s3 ? */ | 
 | 257 | static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) | 
 | 258 | { | 
 | 259 | 	return seq3 - seq2 >= seq1 - seq2; | 
 | 260 | } | 
 | 261 |  | 
| Pavel Emelianov | e4fd5da | 2007-05-29 13:19:18 -0700 | [diff] [blame] | 262 | static inline int tcp_too_many_orphans(struct sock *sk, int num) | 
 | 263 | { | 
 | 264 | 	return (num > sysctl_tcp_max_orphans) || | 
 | 265 | 		(sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | 
 | 266 | 		 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); | 
 | 267 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 |  | 
| Florian Westphal | a0f82f6 | 2009-04-19 09:43:48 +0000 | [diff] [blame] | 269 | /* syncookies: remember time of last synqueue overflow */ | 
 | 270 | static inline void tcp_synq_overflow(struct sock *sk) | 
 | 271 | { | 
 | 272 | 	tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; | 
 | 273 | } | 
 | 274 |  | 
 | 275 | /* syncookies: no recent synqueue overflow on this listening socket? */ | 
 | 276 | static inline int tcp_synq_no_recent_overflow(const struct sock *sk) | 
 | 277 | { | 
 | 278 | 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; | 
 | 279 | 	return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT); | 
 | 280 | } | 
 | 281 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | extern struct proto tcp_prot; | 
 | 283 |  | 
| Pavel Emelyanov | 57ef42d | 2008-07-18 04:02:08 -0700 | [diff] [blame] | 284 | #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field) | 
 | 285 | #define TCP_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) | 
 | 286 | #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field) | 
 | 287 | #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | extern void			tcp_v4_err(struct sk_buff *skb, u32); | 
 | 290 |  | 
 | 291 | extern void			tcp_shutdown (struct sock *sk, int how); | 
 | 292 |  | 
 | 293 | extern int			tcp_v4_rcv(struct sk_buff *skb); | 
 | 294 |  | 
 | 295 | extern int			tcp_v4_remember_stamp(struct sock *sk); | 
 | 296 |  | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 297 | extern int		    	tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 |  | 
| David S. Miller | 3516ffb | 2007-08-02 19:23:56 -0700 | [diff] [blame] | 299 | extern int			tcp_sendmsg(struct kiocb *iocb, struct socket *sock, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | 					    struct msghdr *msg, size_t size); | 
 | 301 | extern ssize_t			tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); | 
 | 302 |  | 
 | 303 | extern int			tcp_ioctl(struct sock *sk,  | 
 | 304 | 					  int cmd,  | 
 | 305 | 					  unsigned long arg); | 
 | 306 |  | 
 | 307 | extern int			tcp_rcv_state_process(struct sock *sk,  | 
 | 308 | 						      struct sk_buff *skb, | 
 | 309 | 						      struct tcphdr *th, | 
 | 310 | 						      unsigned len); | 
 | 311 |  | 
 | 312 | extern int			tcp_rcv_established(struct sock *sk,  | 
 | 313 | 						    struct sk_buff *skb, | 
 | 314 | 						    struct tcphdr *th,  | 
 | 315 | 						    unsigned len); | 
 | 316 |  | 
 | 317 | extern void			tcp_rcv_space_adjust(struct sock *sk); | 
 | 318 |  | 
| Chris Leech | 0e4b499 | 2006-05-23 18:00:16 -0700 | [diff] [blame] | 319 | extern void			tcp_cleanup_rbuf(struct sock *sk, int copied); | 
 | 320 |  | 
| Arnaldo Carvalho de Melo | 6d6ee43 | 2005-12-13 23:25:19 -0800 | [diff] [blame] | 321 | extern int			tcp_twsk_unique(struct sock *sk, | 
 | 322 | 						struct sock *sktw, void *twp); | 
 | 323 |  | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 324 | extern void			tcp_twsk_destructor(struct sock *sk); | 
 | 325 |  | 
| Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 326 | extern ssize_t			tcp_splice_read(struct socket *sk, loff_t *ppos, | 
 | 327 | 					        struct pipe_inode_info *pipe, size_t len, unsigned int flags); | 
 | 328 |  | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 329 | static inline void tcp_dec_quickack_mode(struct sock *sk, | 
 | 330 | 					 const unsigned int pkts) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | { | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 332 | 	struct inet_connection_sock *icsk = inet_csk(sk); | 
| David S. Miller | fc6415b | 2005-07-05 15:17:45 -0700 | [diff] [blame] | 333 |  | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 334 | 	if (icsk->icsk_ack.quick) { | 
 | 335 | 		if (pkts >= icsk->icsk_ack.quick) { | 
 | 336 | 			icsk->icsk_ack.quick = 0; | 
| David S. Miller | fc6415b | 2005-07-05 15:17:45 -0700 | [diff] [blame] | 337 | 			/* Leaving quickack mode we deflate ATO. */ | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 338 | 			icsk->icsk_ack.ato   = TCP_ATO_MIN; | 
| David S. Miller | fc6415b | 2005-07-05 15:17:45 -0700 | [diff] [blame] | 339 | 		} else | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 340 | 			icsk->icsk_ack.quick -= pkts; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | 	} | 
 | 342 | } | 
 | 343 |  | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 344 | extern void tcp_enter_quickack_mode(struct sock *sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | static inline void tcp_clear_options(struct tcp_options_received *rx_opt) | 
 | 347 | { | 
 | 348 |  	rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0; | 
 | 349 | } | 
 | 350 |  | 
| Ilpo Järvinen | bdf1ee5 | 2007-05-27 02:04:16 -0700 | [diff] [blame] | 351 | #define	TCP_ECN_OK		1 | 
 | 352 | #define	TCP_ECN_QUEUE_CWR	2 | 
 | 353 | #define	TCP_ECN_DEMAND_CWR	4 | 
 | 354 |  | 
 | 355 | static __inline__ void | 
 | 356 | TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) | 
 | 357 | { | 
 | 358 | 	if (sysctl_tcp_ecn && th->ece && th->cwr) | 
 | 359 | 		inet_rsk(req)->ecn_ok = 1; | 
 | 360 | } | 
 | 361 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | enum tcp_tw_status | 
 | 363 | { | 
 | 364 | 	TCP_TW_SUCCESS = 0, | 
 | 365 | 	TCP_TW_RST = 1, | 
 | 366 | 	TCP_TW_ACK = 2, | 
 | 367 | 	TCP_TW_SYN = 3 | 
 | 368 | }; | 
 | 369 |  | 
 | 370 |  | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 371 | extern enum tcp_tw_status	tcp_timewait_state_process(struct inet_timewait_sock *tw, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | 							   struct sk_buff *skb, | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 373 | 							   const struct tcphdr *th); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 |  | 
 | 375 | extern struct sock *		tcp_check_req(struct sock *sk,struct sk_buff *skb, | 
| Arnaldo Carvalho de Melo | 60236fd | 2005-06-18 22:47:21 -0700 | [diff] [blame] | 376 | 					      struct request_sock *req, | 
 | 377 | 					      struct request_sock **prev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | extern int			tcp_child_process(struct sock *parent, | 
 | 379 | 						  struct sock *child, | 
 | 380 | 						  struct sk_buff *skb); | 
| Ilpo Järvinen | 46d0de4 | 2007-02-21 23:10:39 -0800 | [diff] [blame] | 381 | extern int			tcp_use_frto(struct sock *sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | extern void			tcp_enter_frto(struct sock *sk); | 
 | 383 | extern void			tcp_enter_loss(struct sock *sk, int how); | 
 | 384 | extern void			tcp_clear_retrans(struct tcp_sock *tp); | 
 | 385 | extern void			tcp_update_metrics(struct sock *sk); | 
 | 386 |  | 
 | 387 | extern void			tcp_close(struct sock *sk,  | 
 | 388 | 					  long timeout); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | extern unsigned int		tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); | 
 | 390 |  | 
 | 391 | extern int			tcp_getsockopt(struct sock *sk, int level,  | 
 | 392 | 					       int optname, | 
 | 393 | 					       char __user *optval,  | 
 | 394 | 					       int __user *optlen); | 
 | 395 | extern int			tcp_setsockopt(struct sock *sk, int level,  | 
 | 396 | 					       int optname, char __user *optval,  | 
 | 397 | 					       int optlen); | 
| Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 398 | extern int			compat_tcp_getsockopt(struct sock *sk, | 
 | 399 | 					int level, int optname, | 
 | 400 | 					char __user *optval, int __user *optlen); | 
 | 401 | extern int			compat_tcp_setsockopt(struct sock *sk, | 
 | 402 | 					int level, int optname, | 
 | 403 | 					char __user *optval, int optlen); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | extern void			tcp_set_keepalive(struct sock *sk, int val); | 
 | 405 | extern int			tcp_recvmsg(struct kiocb *iocb, struct sock *sk, | 
 | 406 | 					    struct msghdr *msg, | 
 | 407 | 					    size_t len, int nonblock,  | 
 | 408 | 					    int flags, int *addr_len); | 
 | 409 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | extern void			tcp_parse_options(struct sk_buff *skb, | 
 | 411 | 						  struct tcp_options_received *opt_rx, | 
 | 412 | 						  int estab); | 
 | 413 |  | 
| YOSHIFUJI Hideaki | 7d5d552 | 2008-04-17 12:29:53 +0900 | [diff] [blame] | 414 | extern u8			*tcp_parse_md5sig_option(struct tcphdr *th); | 
 | 415 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | /* | 
 | 417 |  *	TCP v4 functions exported for the inet6 API | 
 | 418 |  */ | 
 | 419 |  | 
| Arnaldo Carvalho de Melo | 8292a17 | 2005-12-13 23:15:52 -0800 | [diff] [blame] | 420 | extern void		       	tcp_v4_send_check(struct sock *sk, int len, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | 						  struct sk_buff *skb); | 
 | 422 |  | 
 | 423 | extern int			tcp_v4_conn_request(struct sock *sk, | 
 | 424 | 						    struct sk_buff *skb); | 
 | 425 |  | 
 | 426 | extern struct sock *		tcp_create_openreq_child(struct sock *sk, | 
| Arnaldo Carvalho de Melo | 60236fd | 2005-06-18 22:47:21 -0700 | [diff] [blame] | 427 | 							 struct request_sock *req, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | 							 struct sk_buff *skb); | 
 | 429 |  | 
 | 430 | extern struct sock *		tcp_v4_syn_recv_sock(struct sock *sk, | 
 | 431 | 						     struct sk_buff *skb, | 
| Arnaldo Carvalho de Melo | 60236fd | 2005-06-18 22:47:21 -0700 | [diff] [blame] | 432 | 						     struct request_sock *req, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | 							struct dst_entry *dst); | 
 | 434 |  | 
 | 435 | extern int			tcp_v4_do_rcv(struct sock *sk, | 
 | 436 | 					      struct sk_buff *skb); | 
 | 437 |  | 
 | 438 | extern int			tcp_v4_connect(struct sock *sk, | 
 | 439 | 					       struct sockaddr *uaddr, | 
 | 440 | 					       int addr_len); | 
 | 441 |  | 
 | 442 | extern int			tcp_connect(struct sock *sk); | 
 | 443 |  | 
 | 444 | extern struct sk_buff *		tcp_make_synack(struct sock *sk, | 
 | 445 | 						struct dst_entry *dst, | 
| Arnaldo Carvalho de Melo | 60236fd | 2005-06-18 22:47:21 -0700 | [diff] [blame] | 446 | 						struct request_sock *req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 |  | 
 | 448 | extern int			tcp_disconnect(struct sock *sk, int flags); | 
 | 449 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | /* From syncookies.c */ | 
| Florian Westphal | 2051f11 | 2008-03-23 22:21:28 -0700 | [diff] [blame] | 452 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,  | 
 | 454 | 				    struct ip_options *opt); | 
 | 455 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,  | 
 | 456 | 				     __u16 *mss); | 
 | 457 |  | 
| Florian Westphal | 4dfc281 | 2008-04-10 03:12:40 -0700 | [diff] [blame] | 458 | extern __u32 cookie_init_timestamp(struct request_sock *req); | 
 | 459 | extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt); | 
 | 460 |  | 
| Glenn Griffin | c6aefaf | 2008-02-07 21:49:26 -0800 | [diff] [blame] | 461 | /* From net/ipv6/syncookies.c */ | 
 | 462 | extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); | 
 | 463 | extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, | 
 | 464 | 				     __u16 *mss); | 
 | 465 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | /* tcp_output.c */ | 
 | 467 |  | 
| Ilpo Järvinen | 9e412ba | 2007-04-20 22:18:02 -0700 | [diff] [blame] | 468 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, | 
 | 469 | 				      int nonagle); | 
 | 470 | extern int tcp_may_send_now(struct sock *sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | 
 | 472 | extern void tcp_xmit_retransmit_queue(struct sock *); | 
 | 473 | extern void tcp_simple_retransmit(struct sock *); | 
 | 474 | extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); | 
| David S. Miller | 6475be1 | 2005-09-01 22:47:01 -0700 | [diff] [blame] | 475 | extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 |  | 
 | 477 | extern void tcp_send_probe0(struct sock *); | 
 | 478 | extern void tcp_send_partial(struct sock *); | 
 | 479 | extern int  tcp_write_wakeup(struct sock *); | 
 | 480 | extern void tcp_send_fin(struct sock *sk); | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 481 | extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | extern int  tcp_send_synack(struct sock *); | 
| David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 483 | extern void tcp_push_one(struct sock *, unsigned int mss_now); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | extern void tcp_send_ack(struct sock *sk); | 
 | 485 | extern void tcp_send_delayed_ack(struct sock *sk); | 
 | 486 |  | 
| David S. Miller | a762a98 | 2005-07-05 15:18:51 -0700 | [diff] [blame] | 487 | /* tcp_input.c */ | 
 | 488 | extern void tcp_cwnd_application_limited(struct sock *sk); | 
 | 489 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | /* tcp_timer.c */ | 
 | 491 | extern void tcp_init_xmit_timers(struct sock *); | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 492 | static inline void tcp_clear_xmit_timers(struct sock *sk) | 
 | 493 | { | 
 | 494 | 	inet_csk_clear_xmit_timers(sk); | 
 | 495 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); | 
| Ilpo Järvinen | 0c54b85 | 2009-03-14 14:23:05 +0000 | [diff] [blame] | 498 | extern unsigned int tcp_current_mss(struct sock *sk); | 
 | 499 |  | 
 | 500 | /* Bound MSS / TSO packet size with the half of the window */ | 
 | 501 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | 
 | 502 | { | 
 | 503 | 	if (tp->max_window && pktsize > (tp->max_window >> 1)) | 
 | 504 | 		return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | 
 | 505 | 	else | 
 | 506 | 		return pktsize; | 
 | 507 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 |  | 
| Arnaldo Carvalho de Melo | 17b085e | 2005-08-12 12:59:17 -0300 | [diff] [blame] | 509 | /* tcp.c */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | extern void tcp_get_info(struct sock *, struct tcp_info *); | 
 | 511 |  | 
 | 512 | /* Read 'sendfile()'-style from a TCP socket */ | 
 | 513 | typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | 
 | 514 | 				unsigned int, size_t); | 
 | 515 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | 
 | 516 | 			 sk_read_actor_t recv_actor); | 
 | 517 |  | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 518 | extern void tcp_initialize_rcv_mss(struct sock *sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
| John Heffner | 5d424d5 | 2006-03-20 17:53:41 -0800 | [diff] [blame] | 520 | extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); | 
 | 521 | extern int tcp_mss_to_mtu(struct sock *sk, int mss); | 
 | 522 | extern void tcp_mtup_init(struct sock *sk); | 
 | 523 |  | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 524 | static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | { | 
 | 526 | 	tp->pred_flags = htonl((tp->tcp_header_len << 26) | | 
 | 527 | 			       ntohl(TCP_FLAG_ACK) | | 
 | 528 | 			       snd_wnd); | 
 | 529 | } | 
 | 530 |  | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 531 | static inline void tcp_fast_path_on(struct tcp_sock *tp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | { | 
 | 533 | 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | 
 | 534 | } | 
 | 535 |  | 
| Ilpo Järvinen | 9e412ba | 2007-04-20 22:18:02 -0700 | [diff] [blame] | 536 | static inline void tcp_fast_path_check(struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | { | 
| Ilpo Järvinen | 9e412ba | 2007-04-20 22:18:02 -0700 | [diff] [blame] | 538 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 539 |  | 
| David S. Miller | b03efcf | 2005-07-08 14:57:23 -0700 | [diff] [blame] | 540 | 	if (skb_queue_empty(&tp->out_of_order_queue) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | 	    tp->rcv_wnd && | 
 | 542 | 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | 
 | 543 | 	    !tp->urg_data) | 
 | 544 | 		tcp_fast_path_on(tp); | 
 | 545 | } | 
 | 546 |  | 
| Satoru SATOH | 0c26689 | 2009-05-04 11:11:01 -0700 | [diff] [blame] | 547 | /* Compute the actual rto_min value */ | 
 | 548 | static inline u32 tcp_rto_min(struct sock *sk) | 
 | 549 | { | 
 | 550 | 	struct dst_entry *dst = __sk_dst_get(sk); | 
 | 551 | 	u32 rto_min = TCP_RTO_MIN; | 
 | 552 |  | 
 | 553 | 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | 
 | 554 | 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | 
 | 555 | 	return rto_min; | 
 | 556 | } | 
 | 557 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | /* Compute the actual receive window we are currently advertising. | 
 | 559 |  * Rcv_nxt can be after the window if our peer push more data | 
 | 560 |  * than the offered window. | 
 | 561 |  */ | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 562 | static inline u32 tcp_receive_window(const struct tcp_sock *tp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | { | 
 | 564 | 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; | 
 | 565 |  | 
 | 566 | 	if (win < 0) | 
 | 567 | 		win = 0; | 
 | 568 | 	return (u32) win; | 
 | 569 | } | 
 | 570 |  | 
 | 571 | /* Choose a new window, without checks for shrinking, and without | 
 | 572 |  * scaling applied to the result.  The caller does these things | 
 | 573 |  * if necessary.  This is a "raw" window selection. | 
 | 574 |  */ | 
 | 575 | extern u32	__tcp_select_window(struct sock *sk); | 
 | 576 |  | 
 | 577 | /* TCP timestamps are only 32-bits, this causes a slight | 
 | 578 |  * complication on 64-bit systems since we store a snapshot | 
| Stephen Hemminger | 31f3426 | 2005-11-15 15:17:10 -0800 | [diff] [blame] | 579 |  * of jiffies in the buffer control blocks below.  We decided | 
 | 580 |  * to use only the low 32-bits of jiffies and hide the ugly | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 |  * casts with the following macro. | 
 | 582 |  */ | 
 | 583 | #define tcp_time_stamp		((__u32)(jiffies)) | 
 | 584 |  | 
| Stephen Hemminger | caa20d9a | 2005-11-10 17:13:47 -0800 | [diff] [blame] | 585 | /* This is what the send packet queuing engine uses to pass | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 |  * TCP per-packet control information to the transmission | 
 | 587 |  * code.  We also store the host-order sequence numbers in | 
 | 588 |  * here too.  This is 36 bytes on 32-bit architectures, | 
 | 589 |  * 40 bytes on 64-bit machines, if this grows please adjust | 
 | 590 |  * skbuff.h:skbuff->cb[xxx] size appropriately. | 
 | 591 |  */ | 
 | 592 | struct tcp_skb_cb { | 
 | 593 | 	union { | 
 | 594 | 		struct inet_skb_parm	h4; | 
 | 595 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 
 | 596 | 		struct inet6_skb_parm	h6; | 
 | 597 | #endif | 
 | 598 | 	} header;	/* For incoming frames		*/ | 
 | 599 | 	__u32		seq;		/* Starting sequence number	*/ | 
 | 600 | 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/ | 
 | 601 | 	__u32		when;		/* used to compute rtt's	*/ | 
 | 602 | 	__u8		flags;		/* TCP header flags.		*/ | 
 | 603 |  | 
 | 604 | 	/* NOTE: These must match up to the flags byte in a | 
 | 605 | 	 *       real TCP header. | 
 | 606 | 	 */ | 
 | 607 | #define TCPCB_FLAG_FIN		0x01 | 
 | 608 | #define TCPCB_FLAG_SYN		0x02 | 
 | 609 | #define TCPCB_FLAG_RST		0x04 | 
 | 610 | #define TCPCB_FLAG_PSH		0x08 | 
 | 611 | #define TCPCB_FLAG_ACK		0x10 | 
 | 612 | #define TCPCB_FLAG_URG		0x20 | 
 | 613 | #define TCPCB_FLAG_ECE		0x40 | 
 | 614 | #define TCPCB_FLAG_CWR		0x80 | 
 | 615 |  | 
 | 616 | 	__u8		sacked;		/* State flags for SACK/FACK.	*/ | 
 | 617 | #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/ | 
 | 618 | #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/ | 
 | 619 | #define TCPCB_LOST		0x04	/* SKB is lost			*/ | 
 | 620 | #define TCPCB_TAGBITS		0x07	/* All tag bits			*/ | 
 | 621 |  | 
 | 622 | #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/ | 
 | 623 | #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | 
 | 624 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | 	__u32		ack_seq;	/* Sequence number ACK'd	*/ | 
 | 626 | }; | 
 | 627 |  | 
 | 628 | #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0])) | 
 | 629 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | /* Due to TSO, an SKB can be composed of multiple actual | 
 | 631 |  * packets.  To keep these tracked properly, we use this. | 
 | 632 |  */ | 
 | 633 | static inline int tcp_skb_pcount(const struct sk_buff *skb) | 
 | 634 | { | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 635 | 	return skb_shinfo(skb)->gso_segs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | } | 
 | 637 |  | 
 | 638 | /* This is valid iff tcp_skb_pcount() > 1. */ | 
 | 639 | static inline int tcp_skb_mss(const struct sk_buff *skb) | 
 | 640 | { | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 641 | 	return skb_shinfo(skb)->gso_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | } | 
 | 643 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 644 | /* Events passed to congestion control interface */ | 
 | 645 | enum tcp_ca_event { | 
 | 646 | 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */ | 
 | 647 | 	CA_EVENT_CWND_RESTART,	/* congestion window restart */ | 
 | 648 | 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */ | 
 | 649 | 	CA_EVENT_FRTO,		/* fast recovery timeout */ | 
 | 650 | 	CA_EVENT_LOSS,		/* loss timeout */ | 
 | 651 | 	CA_EVENT_FAST_ACK,	/* in sequence ack */ | 
 | 652 | 	CA_EVENT_SLOW_ACK,	/* other ack */ | 
 | 653 | }; | 
 | 654 |  | 
 | 655 | /* | 
 | 656 |  * Interface for adding new TCP congestion control handlers | 
 | 657 |  */ | 
 | 658 | #define TCP_CA_NAME_MAX	16 | 
| Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 659 | #define TCP_CA_MAX	128 | 
 | 660 | #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX) | 
 | 661 |  | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 662 | #define TCP_CONG_NON_RESTRICTED 0x1 | 
 | 663 | #define TCP_CONG_RTT_STAMP	0x2 | 
 | 664 |  | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 665 | struct tcp_congestion_ops { | 
 | 666 | 	struct list_head	list; | 
| Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 667 | 	unsigned long flags; | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 668 |  | 
 | 669 | 	/* initialize private data (optional) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 670 | 	void (*init)(struct sock *sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 671 | 	/* cleanup private data  (optional) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 672 | 	void (*release)(struct sock *sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 673 |  | 
 | 674 | 	/* return slow start threshold (required) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 675 | 	u32 (*ssthresh)(struct sock *sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 676 | 	/* lower bound for congestion window (optional) */ | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 677 | 	u32 (*min_cwnd)(const struct sock *sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 678 | 	/* do new cwnd calculation (required) */ | 
| Ilpo Järvinen | c3a05c6 | 2007-12-02 00:47:59 +0200 | [diff] [blame] | 679 | 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 680 | 	/* call before changing ca_state (optional) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 681 | 	void (*set_state)(struct sock *sk, u8 new_state); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 682 | 	/* call when cwnd event occurs (optional) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 683 | 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 684 | 	/* new value of cwnd after loss (optional) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 685 | 	u32  (*undo_cwnd)(struct sock *sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 686 | 	/* hook for packet ack accounting (optional) */ | 
| Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 687 | 	void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); | 
| Arnaldo Carvalho de Melo | 73c1f4a | 2005-08-12 12:51:49 -0300 | [diff] [blame] | 688 | 	/* get info for inet_diag (optional) */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 689 | 	void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 690 |  | 
 | 691 | 	char 		name[TCP_CA_NAME_MAX]; | 
 | 692 | 	struct module 	*owner; | 
 | 693 | }; | 
 | 694 |  | 
 | 695 | extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); | 
 | 696 | extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); | 
 | 697 |  | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 698 | extern void tcp_init_congestion_control(struct sock *sk); | 
 | 699 | extern void tcp_cleanup_congestion_control(struct sock *sk); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 700 | extern int tcp_set_default_congestion_control(const char *name); | 
 | 701 | extern void tcp_get_default_congestion_control(char *name); | 
| Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 702 | extern void tcp_get_available_congestion_control(char *buf, size_t len); | 
| Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 703 | extern void tcp_get_allowed_congestion_control(char *buf, size_t len); | 
 | 704 | extern int tcp_set_allowed_congestion_control(char *allowed); | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 705 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 706 | extern void tcp_slow_start(struct tcp_sock *tp); | 
| Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 707 | extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 708 |  | 
| Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 709 | extern struct tcp_congestion_ops tcp_init_congestion_ops; | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 710 | extern u32 tcp_reno_ssthresh(struct sock *sk); | 
| Ilpo Järvinen | c3a05c6 | 2007-12-02 00:47:59 +0200 | [diff] [blame] | 711 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); | 
| Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 712 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); | 
| David S. Miller | a8acfba | 2005-06-23 23:45:02 -0700 | [diff] [blame] | 713 | extern struct tcp_congestion_ops tcp_reno; | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 714 |  | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 715 | static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 716 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 717 | 	struct inet_connection_sock *icsk = inet_csk(sk); | 
 | 718 |  | 
 | 719 | 	if (icsk->icsk_ca_ops->set_state) | 
 | 720 | 		icsk->icsk_ca_ops->set_state(sk, ca_state); | 
 | 721 | 	icsk->icsk_ca_state = ca_state; | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 722 | } | 
 | 723 |  | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 724 | static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 725 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 726 | 	const struct inet_connection_sock *icsk = inet_csk(sk); | 
 | 727 |  | 
 | 728 | 	if (icsk->icsk_ca_ops->cwnd_event) | 
 | 729 | 		icsk->icsk_ca_ops->cwnd_event(sk, event); | 
| Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 730 | } | 
 | 731 |  | 
| Ilpo Järvinen | e60402d | 2007-08-09 15:14:46 +0300 | [diff] [blame] | 732 | /* These functions determine how the current flow behaves in respect of SACK | 
 | 733 |  * handling. SACK is negotiated with the peer, and therefore it can vary | 
 | 734 |  * between different flows. | 
 | 735 |  * | 
 | 736 |  * tcp_is_sack - SACK enabled | 
 | 737 |  * tcp_is_reno - No SACK | 
 | 738 |  * tcp_is_fack - FACK enabled, implies SACK enabled | 
 | 739 |  */ | 
 | 740 | static inline int tcp_is_sack(const struct tcp_sock *tp) | 
 | 741 | { | 
 | 742 | 	return tp->rx_opt.sack_ok; | 
 | 743 | } | 
 | 744 |  | 
 | 745 | static inline int tcp_is_reno(const struct tcp_sock *tp) | 
 | 746 | { | 
 | 747 | 	return !tcp_is_sack(tp); | 
 | 748 | } | 
 | 749 |  | 
 | 750 | static inline int tcp_is_fack(const struct tcp_sock *tp) | 
 | 751 | { | 
 | 752 | 	return tp->rx_opt.sack_ok & 2; | 
 | 753 | } | 
 | 754 |  | 
 | 755 | static inline void tcp_enable_fack(struct tcp_sock *tp) | 
 | 756 | { | 
 | 757 | 	tp->rx_opt.sack_ok |= 2; | 
 | 758 | } | 
 | 759 |  | 
| Ilpo Järvinen | 83ae408 | 2007-08-09 14:37:30 +0300 | [diff] [blame] | 760 | static inline unsigned int tcp_left_out(const struct tcp_sock *tp) | 
 | 761 | { | 
 | 762 | 	return tp->sacked_out + tp->lost_out; | 
 | 763 | } | 
 | 764 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | /* This determines how many packets are "in the network" to the best | 
 | 766 |  * of our knowledge.  In many cases it is conservative, but where | 
 | 767 |  * detailed information is available from the receiver (via SACK | 
 | 768 |  * blocks etc.) we can make more aggressive calculations. | 
 | 769 |  * | 
 | 770 |  * Use this for decisions involving congestion control, use just | 
 | 771 |  * tp->packets_out to determine if the send queue is empty or not. | 
 | 772 |  * | 
 | 773 |  * Read this equation as: | 
 | 774 |  * | 
 | 775 |  *	"Packets sent once on transmission queue" MINUS | 
 | 776 |  *	"Packets left network, but not honestly ACKed yet" PLUS | 
 | 777 |  *	"Packets fast retransmitted" | 
 | 778 |  */ | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 779 | static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | { | 
| Ilpo Järvinen | 83ae408 | 2007-08-09 14:37:30 +0300 | [diff] [blame] | 781 | 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | } | 
 | 783 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. | 
 | 785 |  * The exception is rate halving phase, when cwnd is decreasing towards | 
 | 786 |  * ssthresh. | 
 | 787 |  */ | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 788 | static inline __u32 tcp_current_ssthresh(const struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | { | 
| Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 790 | 	const struct tcp_sock *tp = tcp_sk(sk); | 
 | 791 | 	if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | 		return tp->snd_ssthresh; | 
 | 793 | 	else | 
 | 794 | 		return max(tp->snd_ssthresh, | 
 | 795 | 			   ((tp->snd_cwnd >> 1) + | 
 | 796 | 			    (tp->snd_cwnd >> 2))); | 
 | 797 | } | 
 | 798 |  | 
| Ilpo Järvinen | b9c4595 | 2007-07-27 16:36:17 +0300 | [diff] [blame] | 799 | /* Use define here intentionally to get WARN_ON location shown at the caller */ | 
 | 800 | #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 |  | 
| Ilpo Järvinen | 3cfe3ba | 2007-02-27 10:09:49 -0800 | [diff] [blame] | 802 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | 
 | 804 |  | 
 | 805 | /* Slow start with delack produces 3 packets of burst, so that | 
| John Heffner | dd9e0dd | 2008-04-15 15:26:39 -0700 | [diff] [blame] | 806 |  * it is safe "de facto".  This will be the default - same as | 
 | 807 |  * the default reordering threshold - but if reordering increases, | 
 | 808 |  * we must be able to allow cwnd to burst at least this much in order | 
 | 809 |  * to not pull it back when holes are filled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 |  */ | 
 | 811 | static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | 
 | 812 | { | 
| John Heffner | dd9e0dd | 2008-04-15 15:26:39 -0700 | [diff] [blame] | 813 | 	return tp->reordering; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | } | 
 | 815 |  | 
| Ilpo Järvinen | 90840de | 2007-12-31 04:48:41 -0800 | [diff] [blame] | 816 | /* Returns end sequence number of the receiver's advertised window */ | 
 | 817 | static inline u32 tcp_wnd_end(const struct tcp_sock *tp) | 
 | 818 | { | 
 | 819 | 	return tp->snd_una + tp->snd_wnd; | 
 | 820 | } | 
| Ilpo Järvinen | cea14e0 | 2008-01-12 03:19:12 -0800 | [diff] [blame] | 821 | extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); | 
| Stephen Hemminger | f4805ed | 2005-11-10 16:53:30 -0800 | [diff] [blame] | 822 |  | 
| Chuck Lever | c1bd24b | 2007-10-23 21:08:54 -0700 | [diff] [blame] | 823 | static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 824 | 				       const struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | { | 
 | 826 | 	if (skb->len < mss) | 
 | 827 | 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 
 | 828 | } | 
 | 829 |  | 
| Ilpo Järvinen | 9e412ba | 2007-04-20 22:18:02 -0700 | [diff] [blame] | 830 | static inline void tcp_check_probe_timer(struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | { | 
| Ilpo Järvinen | 9e412ba | 2007-04-20 22:18:02 -0700 | [diff] [blame] | 832 | 	struct tcp_sock *tp = tcp_sk(sk); | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 833 | 	const struct inet_connection_sock *icsk = inet_csk(sk); | 
| Ilpo Järvinen | 9e412ba | 2007-04-20 22:18:02 -0700 | [diff] [blame] | 834 |  | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 835 | 	if (!tp->packets_out && !icsk->icsk_pending) | 
| Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 836 | 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 
 | 837 | 					  icsk->icsk_rto, TCP_RTO_MAX); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | } | 
 | 839 |  | 
| Ilpo Järvinen | 9e412ba | 2007-04-20 22:18:02 -0700 | [diff] [blame] | 840 | static inline void tcp_push_pending_frames(struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | { | 
| Ilpo Järvinen | 9e412ba | 2007-04-20 22:18:02 -0700 | [diff] [blame] | 842 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 843 |  | 
| Ilpo Järvinen | 0c54b85 | 2009-03-14 14:23:05 +0000 | [diff] [blame] | 844 | 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | } | 
 | 846 |  | 
| Hantzis Fotis | ee7537b | 2009-03-02 22:42:02 -0800 | [diff] [blame] | 847 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | { | 
 | 849 | 	tp->snd_wl1 = seq; | 
 | 850 | } | 
 | 851 |  | 
| Hantzis Fotis | ee7537b | 2009-03-02 22:42:02 -0800 | [diff] [blame] | 852 | static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | { | 
 | 854 | 	tp->snd_wl1 = seq; | 
 | 855 | } | 
 | 856 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | /* | 
 | 858 |  * Calculate(/check) TCP checksum | 
 | 859 |  */ | 
| Frederik Deweerdt | ba7808e | 2007-02-04 20:15:27 -0800 | [diff] [blame] | 860 | static inline __sum16 tcp_v4_check(int len, __be32 saddr, | 
 | 861 | 				   __be32 daddr, __wsum base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | { | 
 | 863 | 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); | 
 | 864 | } | 
 | 865 |  | 
| Al Viro | b51655b | 2006-11-14 21:40:42 -0800 | [diff] [blame] | 866 | static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | { | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 868 | 	return __skb_checksum_complete(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | } | 
 | 870 |  | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 871 | static inline int tcp_checksum_complete(struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | { | 
| Herbert Xu | 6047637 | 2007-04-09 11:59:39 -0700 | [diff] [blame] | 873 | 	return !skb_csum_unnecessary(skb) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | 		__tcp_checksum_complete(skb); | 
 | 875 | } | 
 | 876 |  | 
 | 877 | /* Prequeue for VJ style copy to user, combined with checksumming. */ | 
 | 878 |  | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 879 | static inline void tcp_prequeue_init(struct tcp_sock *tp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | { | 
 | 881 | 	tp->ucopy.task = NULL; | 
 | 882 | 	tp->ucopy.len = 0; | 
 | 883 | 	tp->ucopy.memory = 0; | 
 | 884 | 	skb_queue_head_init(&tp->ucopy.prequeue); | 
| Chris Leech | 97fc2f0 | 2006-05-23 17:55:33 -0700 | [diff] [blame] | 885 | #ifdef CONFIG_NET_DMA | 
 | 886 | 	tp->ucopy.dma_chan = NULL; | 
 | 887 | 	tp->ucopy.wakeup = 0; | 
 | 888 | 	tp->ucopy.pinned_list = NULL; | 
 | 889 | 	tp->ucopy.dma_cookie = 0; | 
 | 890 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | } | 
 | 892 |  | 
 | 893 | /* Packet is added to VJ-style prequeue for processing in process | 
 | 894 |  * context, if a reader task is waiting. Apparently, this exciting | 
 | 895 |  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) | 
 | 896 |  * failed somewhere. Latency? Burstiness? Well, at least now we will | 
 | 897 |  * see, why it failed. 8)8)				  --ANK | 
 | 898 |  * | 
 | 899 |  * NOTE: is this not too big to inline? | 
 | 900 |  */ | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 901 | static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | { | 
 | 903 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 904 |  | 
| Eric Dumazet | f5f8d86 | 2009-05-07 07:08:38 +0000 | [diff] [blame] | 905 | 	if (sysctl_tcp_low_latency || !tp->ucopy.task) | 
 | 906 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 |  | 
| Eric Dumazet | f5f8d86 | 2009-05-07 07:08:38 +0000 | [diff] [blame] | 908 | 	__skb_queue_tail(&tp->ucopy.prequeue, skb); | 
 | 909 | 	tp->ucopy.memory += skb->truesize; | 
 | 910 | 	if (tp->ucopy.memory > sk->sk_rcvbuf) { | 
 | 911 | 		struct sk_buff *skb1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 |  | 
| Eric Dumazet | f5f8d86 | 2009-05-07 07:08:38 +0000 | [diff] [blame] | 913 | 		BUG_ON(sock_owned_by_user(sk)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 |  | 
| Eric Dumazet | f5f8d86 | 2009-05-07 07:08:38 +0000 | [diff] [blame] | 915 | 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | 
 | 916 | 			sk_backlog_rcv(sk, skb1); | 
 | 917 | 			NET_INC_STATS_BH(sock_net(sk), | 
 | 918 | 					 LINUX_MIB_TCPPREQUEUEDROPPED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | 		} | 
| Eric Dumazet | f5f8d86 | 2009-05-07 07:08:38 +0000 | [diff] [blame] | 920 |  | 
 | 921 | 		tp->ucopy.memory = 0; | 
 | 922 | 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | 
| Eric Dumazet | 7aedec2 | 2009-05-07 07:20:39 +0000 | [diff] [blame] | 923 | 		wake_up_interruptible_poll(sk->sk_sleep, | 
 | 924 | 					   POLLIN | POLLRDNORM | POLLRDBAND); | 
| Eric Dumazet | f5f8d86 | 2009-05-07 07:08:38 +0000 | [diff] [blame] | 925 | 		if (!inet_csk_ack_scheduled(sk)) | 
 | 926 | 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | 
| David S. Miller | 22f6dac | 2009-05-08 02:48:30 -0700 | [diff] [blame] | 927 | 						  (3 * tcp_rto_min(sk)) / 4, | 
| Eric Dumazet | f5f8d86 | 2009-05-07 07:08:38 +0000 | [diff] [blame] | 928 | 						  TCP_RTO_MAX); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | 	} | 
| Eric Dumazet | f5f8d86 | 2009-05-07 07:08:38 +0000 | [diff] [blame] | 930 | 	return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | } | 
 | 932 |  | 
 | 933 |  | 
 | 934 | #undef STATE_TRACE | 
 | 935 |  | 
 | 936 | #ifdef STATE_TRACE | 
 | 937 | static const char *statename[]={ | 
 | 938 | 	"Unused","Established","Syn Sent","Syn Recv", | 
 | 939 | 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close", | 
 | 940 | 	"Close Wait","Last ACK","Listen","Closing" | 
 | 941 | }; | 
 | 942 | #endif | 
| Ilpo Järvinen | 490d504 | 2008-01-12 03:17:20 -0800 | [diff] [blame] | 943 | extern void tcp_set_state(struct sock *sk, int state); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 |  | 
| Andi Kleen | 4ac02ba | 2007-04-20 17:11:46 -0700 | [diff] [blame] | 945 | extern void tcp_done(struct sock *sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 |  | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 947 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | { | 
 | 949 | 	rx_opt->dsack = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | 	rx_opt->num_sacks = 0; | 
 | 951 | } | 
 | 952 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | /* Determine a window scaling and initial window to offer. */ | 
 | 954 | extern void tcp_select_initial_window(int __space, __u32 mss, | 
 | 955 | 				      __u32 *rcv_wnd, __u32 *window_clamp, | 
 | 956 | 				      int wscale_ok, __u8 *rcv_wscale); | 
 | 957 |  | 
 | 958 | static inline int tcp_win_from_space(int space) | 
 | 959 | { | 
 | 960 | 	return sysctl_tcp_adv_win_scale<=0 ? | 
 | 961 | 		(space>>(-sysctl_tcp_adv_win_scale)) : | 
 | 962 | 		space - (space>>sysctl_tcp_adv_win_scale); | 
 | 963 | } | 
 | 964 |  | 
 | 965 | /* Note: caller must be prepared to deal with negative returns */  | 
 | 966 | static inline int tcp_space(const struct sock *sk) | 
 | 967 | { | 
 | 968 | 	return tcp_win_from_space(sk->sk_rcvbuf - | 
 | 969 | 				  atomic_read(&sk->sk_rmem_alloc)); | 
 | 970 | }  | 
 | 971 |  | 
 | 972 | static inline int tcp_full_space(const struct sock *sk) | 
 | 973 | { | 
 | 974 | 	return tcp_win_from_space(sk->sk_rcvbuf);  | 
 | 975 | } | 
 | 976 |  | 
| Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 977 | static inline void tcp_openreq_init(struct request_sock *req, | 
 | 978 | 				    struct tcp_options_received *rx_opt, | 
 | 979 | 				    struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | { | 
| Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 981 | 	struct inet_request_sock *ireq = inet_rsk(req); | 
 | 982 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | 	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */ | 
| Florian Westphal | 4dfc281 | 2008-04-10 03:12:40 -0700 | [diff] [blame] | 984 | 	req->cookie_ts = 0; | 
| Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 985 | 	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 986 | 	req->mss = rx_opt->mss_clamp; | 
 | 987 | 	req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; | 
| Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 988 | 	ireq->tstamp_ok = rx_opt->tstamp_ok; | 
 | 989 | 	ireq->sack_ok = rx_opt->sack_ok; | 
 | 990 | 	ireq->snd_wscale = rx_opt->snd_wscale; | 
 | 991 | 	ireq->wscale_ok = rx_opt->wscale_ok; | 
 | 992 | 	ireq->acked = 0; | 
 | 993 | 	ireq->ecn_ok = 0; | 
| Arnaldo Carvalho de Melo | aa8223c | 2007-04-10 21:04:22 -0700 | [diff] [blame] | 994 | 	ireq->rmt_port = tcp_hdr(skb)->source; | 
| KOVACS Krisztian | a3116ac | 2008-10-01 07:46:49 -0700 | [diff] [blame] | 995 | 	ireq->loc_port = tcp_hdr(skb)->dest; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | } | 
 | 997 |  | 
| Pavel Emelyanov | 5c52ba1 | 2008-07-16 20:28:10 -0700 | [diff] [blame] | 998 | extern void tcp_enter_memory_pressure(struct sock *sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) | 
 | 1001 | { | 
 | 1002 | 	return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; | 
 | 1003 | } | 
 | 1004 |  | 
 | 1005 | static inline int keepalive_time_when(const struct tcp_sock *tp) | 
 | 1006 | { | 
 | 1007 | 	return tp->keepalive_time ? : sysctl_tcp_keepalive_time; | 
 | 1008 | } | 
 | 1009 |  | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1010 | static inline int tcp_fin_time(const struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | { | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1012 | 	int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; | 
 | 1013 | 	const int rto = inet_csk(sk)->icsk_rto; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 |  | 
| Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1015 | 	if (fin_timeout < (rto << 2) - (rto >> 1)) | 
 | 1016 | 		fin_timeout = (rto << 2) - (rto >> 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 |  | 
 | 1018 | 	return fin_timeout; | 
 | 1019 | } | 
 | 1020 |  | 
| Ilpo Järvinen | c887e6d | 2009-03-14 14:23:03 +0000 | [diff] [blame] | 1021 | static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, | 
 | 1022 | 				 int paws_win) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 | { | 
| Ilpo Järvinen | c887e6d | 2009-03-14 14:23:03 +0000 | [diff] [blame] | 1024 | 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) | 
 | 1025 | 		return 1; | 
 | 1026 | 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) | 
 | 1027 | 		return 1; | 
 | 1028 |  | 
 | 1029 | 	return 0; | 
 | 1030 | } | 
 | 1031 |  | 
 | 1032 | static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, | 
 | 1033 | 				  int rst) | 
 | 1034 | { | 
 | 1035 | 	if (tcp_paws_check(rx_opt, 0)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | 		return 0; | 
 | 1037 |  | 
 | 1038 | 	/* RST segments are not recommended to carry timestamp, | 
 | 1039 | 	   and, if they do, it is recommended to ignore PAWS because | 
 | 1040 | 	   "their cleanup function should take precedence over timestamps." | 
 | 1041 | 	   Certainly, it is mistake. It is necessary to understand the reasons | 
 | 1042 | 	   of this constraint to relax it: if peer reboots, clock may go | 
 | 1043 | 	   out-of-sync and half-open connections will not be reset. | 
 | 1044 | 	   Actually, the problem would be not existing if all | 
 | 1045 | 	   the implementations followed draft about maintaining clock | 
 | 1046 | 	   via reboots. Linux-2.2 DOES NOT! | 
 | 1047 |  | 
 | 1048 | 	   However, we can relax time bounds for RST segments to MSL. | 
 | 1049 | 	 */ | 
| James Morris | 9d729f7 | 2007-03-04 16:12:44 -0800 | [diff] [blame] | 1050 | 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | 		return 0; | 
 | 1052 | 	return 1; | 
 | 1053 | } | 
 | 1054 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | #define TCP_CHECK_TIMER(sk) do { } while (0) | 
 | 1056 |  | 
| Pavel Emelyanov | a9c1932 | 2008-07-16 20:21:42 -0700 | [diff] [blame] | 1057 | static inline void tcp_mib_init(struct net *net) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | { | 
 | 1059 | 	/* See RFC 2012 */ | 
| Pavel Emelyanov | cf1100a | 2008-07-16 20:27:38 -0700 | [diff] [blame] | 1060 | 	TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1); | 
 | 1061 | 	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); | 
 | 1062 | 	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); | 
 | 1063 | 	TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | } | 
 | 1065 |  | 
| Ilpo Järvinen | 5af4ec2 | 2007-09-20 11:30:48 -0700 | [diff] [blame] | 1066 | /* from STCP */ | 
| Ilpo Järvinen | ef9da47 | 2008-09-20 21:25:15 -0700 | [diff] [blame] | 1067 | static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) | 
| David S. Miller | 0800f17 | 2007-09-20 11:40:37 -0700 | [diff] [blame] | 1068 | { | 
| Stephen Hemminger | 6a438bb | 2005-11-10 17:14:59 -0800 | [diff] [blame] | 1069 | 	tp->lost_skb_hint = NULL; | 
 | 1070 | 	tp->scoreboard_skb_hint = NULL; | 
| Ilpo Järvinen | ef9da47 | 2008-09-20 21:25:15 -0700 | [diff] [blame] | 1071 | } | 
 | 1072 |  | 
 | 1073 | static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) | 
 | 1074 | { | 
 | 1075 | 	tcp_clear_retrans_hints_partial(tp); | 
| Stephen Hemminger | 6a438bb | 2005-11-10 17:14:59 -0800 | [diff] [blame] | 1076 | 	tp->retransmit_skb_hint = NULL; | 
| Ilpo Järvinen | b768920 | 2007-09-20 11:37:19 -0700 | [diff] [blame] | 1077 | } | 
 | 1078 |  | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1079 | /* MD5 Signature */ | 
 | 1080 | struct crypto_hash; | 
 | 1081 |  | 
 | 1082 | /* - key database */ | 
 | 1083 | struct tcp_md5sig_key { | 
 | 1084 | 	u8			*key; | 
 | 1085 | 	u8			keylen; | 
 | 1086 | }; | 
 | 1087 |  | 
 | 1088 | struct tcp4_md5sig_key { | 
| David S. Miller | f8ab18d | 2007-09-28 15:18:35 -0700 | [diff] [blame] | 1089 | 	struct tcp_md5sig_key	base; | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1090 | 	__be32			addr; | 
 | 1091 | }; | 
 | 1092 |  | 
 | 1093 | struct tcp6_md5sig_key { | 
| David S. Miller | f8ab18d | 2007-09-28 15:18:35 -0700 | [diff] [blame] | 1094 | 	struct tcp_md5sig_key	base; | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1095 | #if 0 | 
 | 1096 | 	u32			scope_id;	/* XXX */ | 
 | 1097 | #endif | 
 | 1098 | 	struct in6_addr		addr; | 
 | 1099 | }; | 
 | 1100 |  | 
 | 1101 | /* - sock block */ | 
 | 1102 | struct tcp_md5sig_info { | 
 | 1103 | 	struct tcp4_md5sig_key	*keys4; | 
 | 1104 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 
 | 1105 | 	struct tcp6_md5sig_key	*keys6; | 
 | 1106 | 	u32			entries6; | 
 | 1107 | 	u32			alloced6; | 
 | 1108 | #endif | 
 | 1109 | 	u32			entries4; | 
 | 1110 | 	u32			alloced4; | 
 | 1111 | }; | 
 | 1112 |  | 
 | 1113 | /* - pseudo header */ | 
 | 1114 | struct tcp4_pseudohdr { | 
 | 1115 | 	__be32		saddr; | 
 | 1116 | 	__be32		daddr; | 
 | 1117 | 	__u8		pad; | 
 | 1118 | 	__u8		protocol; | 
 | 1119 | 	__be16		len; | 
 | 1120 | }; | 
 | 1121 |  | 
 | 1122 | struct tcp6_pseudohdr { | 
 | 1123 | 	struct in6_addr	saddr; | 
 | 1124 | 	struct in6_addr daddr; | 
 | 1125 | 	__be32		len; | 
 | 1126 | 	__be32		protocol;	/* including padding */ | 
 | 1127 | }; | 
 | 1128 |  | 
 | 1129 | union tcp_md5sum_block { | 
 | 1130 | 	struct tcp4_pseudohdr ip4; | 
 | 1131 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 
 | 1132 | 	struct tcp6_pseudohdr ip6; | 
 | 1133 | #endif | 
 | 1134 | }; | 
 | 1135 |  | 
 | 1136 | /* - pool: digest algorithm, hash description and scratch buffer */ | 
 | 1137 | struct tcp_md5sig_pool { | 
 | 1138 | 	struct hash_desc	md5_desc; | 
 | 1139 | 	union tcp_md5sum_block	md5_blk; | 
 | 1140 | }; | 
 | 1141 |  | 
 | 1142 | #define TCP_MD5SIG_MAXKEYS	(~(u32)0)	/* really?! */ | 
 | 1143 |  | 
 | 1144 | /* - functions */ | 
| Adam Langley | 49a72df | 2008-07-19 00:01:42 -0700 | [diff] [blame] | 1145 | extern int			tcp_v4_md5_hash_skb(char *md5_hash, | 
 | 1146 | 						    struct tcp_md5sig_key *key, | 
 | 1147 | 						    struct sock *sk, | 
 | 1148 | 						    struct request_sock *req, | 
 | 1149 | 						    struct sk_buff *skb); | 
| YOSHIFUJI Hideaki | 8d26d76 | 2008-04-17 13:19:16 +0900 | [diff] [blame] | 1150 |  | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1151 | extern struct tcp_md5sig_key	*tcp_v4_md5_lookup(struct sock *sk, | 
 | 1152 | 						   struct sock *addr_sk); | 
 | 1153 |  | 
 | 1154 | extern int			tcp_v4_md5_do_add(struct sock *sk, | 
 | 1155 | 						  __be32 addr, | 
 | 1156 | 						  u8 *newkey, | 
 | 1157 | 						  u8 newkeylen); | 
 | 1158 |  | 
 | 1159 | extern int			tcp_v4_md5_do_del(struct sock *sk, | 
| Al Viro | 8e5200f | 2006-11-20 18:06:37 -0800 | [diff] [blame] | 1160 | 						  __be32 addr); | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1161 |  | 
| YOSHIFUJI Hideaki | 9501f97 | 2008-04-18 12:45:16 +0900 | [diff] [blame] | 1162 | #ifdef CONFIG_TCP_MD5SIG | 
 | 1163 | #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_keylen ? 		 \ | 
 | 1164 | 				 &(struct tcp_md5sig_key) {		 \ | 
 | 1165 | 					.key = (twsk)->tw_md5_key,	 \ | 
 | 1166 | 					.keylen = (twsk)->tw_md5_keylen, \ | 
 | 1167 | 				} : NULL) | 
 | 1168 | #else | 
 | 1169 | #define tcp_twsk_md5_key(twsk)	NULL | 
 | 1170 | #endif | 
 | 1171 |  | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1172 | extern struct tcp_md5sig_pool	**tcp_alloc_md5sig_pool(void); | 
 | 1173 | extern void			tcp_free_md5sig_pool(void); | 
 | 1174 |  | 
 | 1175 | extern struct tcp_md5sig_pool	*__tcp_get_md5sig_pool(int cpu); | 
 | 1176 | extern void			__tcp_put_md5sig_pool(void); | 
| Adam Langley | 49a72df | 2008-07-19 00:01:42 -0700 | [diff] [blame] | 1177 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); | 
 | 1178 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | 
 | 1179 | 				 unsigned header_len); | 
 | 1180 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | 
 | 1181 | 			    struct tcp_md5sig_key *key); | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1182 |  | 
 | 1183 | static inline | 
 | 1184 | struct tcp_md5sig_pool		*tcp_get_md5sig_pool(void) | 
 | 1185 | { | 
 | 1186 | 	int cpu = get_cpu(); | 
 | 1187 | 	struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); | 
 | 1188 | 	if (!ret) | 
 | 1189 | 		put_cpu(); | 
 | 1190 | 	return ret; | 
 | 1191 | } | 
 | 1192 |  | 
 | 1193 | static inline void		tcp_put_md5sig_pool(void) | 
 | 1194 | { | 
 | 1195 | 	__tcp_put_md5sig_pool(); | 
 | 1196 | 	put_cpu(); | 
 | 1197 | } | 
 | 1198 |  | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1199 | /* write queue abstraction */ | 
 | 1200 | static inline void tcp_write_queue_purge(struct sock *sk) | 
 | 1201 | { | 
 | 1202 | 	struct sk_buff *skb; | 
 | 1203 |  | 
 | 1204 | 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | 
| Hideo Aoki | 3ab224b | 2007-12-31 00:11:19 -0800 | [diff] [blame] | 1205 | 		sk_wmem_free_skb(sk, skb); | 
 | 1206 | 	sk_mem_reclaim(sk); | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1207 | } | 
 | 1208 |  | 
 | 1209 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | 
 | 1210 | { | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1211 | 	return skb_peek(&sk->sk_write_queue); | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1212 | } | 
 | 1213 |  | 
 | 1214 | static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) | 
 | 1215 | { | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1216 | 	return skb_peek_tail(&sk->sk_write_queue); | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1217 | } | 
 | 1218 |  | 
 | 1219 | static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) | 
 | 1220 | { | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1221 | 	return skb_queue_next(&sk->sk_write_queue, skb); | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1222 | } | 
 | 1223 |  | 
| Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 1224 | static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb) | 
 | 1225 | { | 
 | 1226 | 	return skb_queue_prev(&sk->sk_write_queue, skb); | 
 | 1227 | } | 
 | 1228 |  | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1229 | #define tcp_for_write_queue(skb, sk)					\ | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1230 | 	skb_queue_walk(&(sk)->sk_write_queue, skb) | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1231 |  | 
 | 1232 | #define tcp_for_write_queue_from(skb, sk)				\ | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1233 | 	skb_queue_walk_from(&(sk)->sk_write_queue, skb) | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1234 |  | 
| Ilpo Järvinen | 234b686 | 2007-12-02 00:48:02 +0200 | [diff] [blame] | 1235 | #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\ | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1236 | 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) | 
| Ilpo Järvinen | 234b686 | 2007-12-02 00:48:02 +0200 | [diff] [blame] | 1237 |  | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1238 | static inline struct sk_buff *tcp_send_head(struct sock *sk) | 
 | 1239 | { | 
 | 1240 | 	return sk->sk_send_head; | 
 | 1241 | } | 
 | 1242 |  | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1243 | static inline bool tcp_skb_is_last(const struct sock *sk, | 
 | 1244 | 				   const struct sk_buff *skb) | 
 | 1245 | { | 
 | 1246 | 	return skb_queue_is_last(&sk->sk_write_queue, skb); | 
 | 1247 | } | 
 | 1248 |  | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1249 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) | 
 | 1250 | { | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1251 | 	if (tcp_skb_is_last(sk, skb)) | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1252 | 		sk->sk_send_head = NULL; | 
| David S. Miller | cd07a8e | 2008-09-23 00:50:13 -0700 | [diff] [blame] | 1253 | 	else | 
 | 1254 | 		sk->sk_send_head = tcp_write_queue_next(sk, skb); | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1255 | } | 
 | 1256 |  | 
 | 1257 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) | 
 | 1258 | { | 
 | 1259 | 	if (sk->sk_send_head == skb_unlinked) | 
 | 1260 | 		sk->sk_send_head = NULL; | 
 | 1261 | } | 
 | 1262 |  | 
 | 1263 | static inline void tcp_init_send_head(struct sock *sk) | 
 | 1264 | { | 
 | 1265 | 	sk->sk_send_head = NULL; | 
 | 1266 | } | 
 | 1267 |  | 
 | 1268 | static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | 
 | 1269 | { | 
 | 1270 | 	__skb_queue_tail(&sk->sk_write_queue, skb); | 
 | 1271 | } | 
 | 1272 |  | 
 | 1273 | static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | 
 | 1274 | { | 
 | 1275 | 	__tcp_add_write_queue_tail(sk, skb); | 
 | 1276 |  | 
 | 1277 | 	/* Queue it, remembering where we must start sending. */ | 
| Ilpo Järvinen | 6859d49 | 2007-12-02 00:48:06 +0200 | [diff] [blame] | 1278 | 	if (sk->sk_send_head == NULL) { | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1279 | 		sk->sk_send_head = skb; | 
| Ilpo Järvinen | 6859d49 | 2007-12-02 00:48:06 +0200 | [diff] [blame] | 1280 |  | 
 | 1281 | 		if (tcp_sk(sk)->highest_sack == NULL) | 
 | 1282 | 			tcp_sk(sk)->highest_sack = skb; | 
 | 1283 | 	} | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1284 | } | 
 | 1285 |  | 
 | 1286 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) | 
 | 1287 | { | 
 | 1288 | 	__skb_queue_head(&sk->sk_write_queue, skb); | 
 | 1289 | } | 
 | 1290 |  | 
 | 1291 | /* Insert buff after skb on the write queue of sk.  */ | 
 | 1292 | static inline void tcp_insert_write_queue_after(struct sk_buff *skb, | 
 | 1293 | 						struct sk_buff *buff, | 
 | 1294 | 						struct sock *sk) | 
 | 1295 | { | 
| Gerrit Renker | 7de6c03 | 2008-04-14 00:05:09 -0700 | [diff] [blame] | 1296 | 	__skb_queue_after(&sk->sk_write_queue, skb, buff); | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1297 | } | 
 | 1298 |  | 
| David S. Miller | 43f59c8 | 2008-09-21 21:28:51 -0700 | [diff] [blame] | 1299 | /* Insert new before skb on the write queue of sk.  */ | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1300 | static inline void tcp_insert_write_queue_before(struct sk_buff *new, | 
 | 1301 | 						  struct sk_buff *skb, | 
 | 1302 | 						  struct sock *sk) | 
 | 1303 | { | 
| David S. Miller | 43f59c8 | 2008-09-21 21:28:51 -0700 | [diff] [blame] | 1304 | 	__skb_queue_before(&sk->sk_write_queue, skb, new); | 
| Ilpo Järvinen | 6e42141 | 2007-11-19 23:24:09 -0800 | [diff] [blame] | 1305 |  | 
 | 1306 | 	if (sk->sk_send_head == skb) | 
 | 1307 | 		sk->sk_send_head = new; | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1308 | } | 
 | 1309 |  | 
 | 1310 | static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) | 
 | 1311 | { | 
 | 1312 | 	__skb_unlink(skb, &sk->sk_write_queue); | 
 | 1313 | } | 
 | 1314 |  | 
| David S. Miller | fe067e8 | 2007-03-07 12:12:44 -0800 | [diff] [blame] | 1315 | static inline int tcp_write_queue_empty(struct sock *sk) | 
 | 1316 | { | 
 | 1317 | 	return skb_queue_empty(&sk->sk_write_queue); | 
 | 1318 | } | 
 | 1319 |  | 
| Ilpo Järvinen | a47e5a9 | 2007-11-15 19:41:46 -0800 | [diff] [blame] | 1320 | /* Start sequence of the highest skb with SACKed bit, valid only if | 
 | 1321 |  * sacked > 0 or when the caller has ensured validity by itself. | 
 | 1322 |  */ | 
 | 1323 | static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) | 
 | 1324 | { | 
 | 1325 | 	if (!tp->sacked_out) | 
 | 1326 | 		return tp->snd_una; | 
| Ilpo Järvinen | 6859d49 | 2007-12-02 00:48:06 +0200 | [diff] [blame] | 1327 |  | 
 | 1328 | 	if (tp->highest_sack == NULL) | 
 | 1329 | 		return tp->snd_nxt; | 
 | 1330 |  | 
| Ilpo Järvinen | a47e5a9 | 2007-11-15 19:41:46 -0800 | [diff] [blame] | 1331 | 	return TCP_SKB_CB(tp->highest_sack)->seq; | 
 | 1332 | } | 
 | 1333 |  | 
| Ilpo Järvinen | 6859d49 | 2007-12-02 00:48:06 +0200 | [diff] [blame] | 1334 | static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) | 
 | 1335 | { | 
 | 1336 | 	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL : | 
 | 1337 | 						tcp_write_queue_next(sk, skb); | 
 | 1338 | } | 
 | 1339 |  | 
 | 1340 | static inline struct sk_buff *tcp_highest_sack(struct sock *sk) | 
 | 1341 | { | 
 | 1342 | 	return tcp_sk(sk)->highest_sack; | 
 | 1343 | } | 
 | 1344 |  | 
 | 1345 | static inline void tcp_highest_sack_reset(struct sock *sk) | 
 | 1346 | { | 
 | 1347 | 	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); | 
 | 1348 | } | 
 | 1349 |  | 
 | 1350 | /* Called when old skb is about to be deleted (to be combined with new skb) */ | 
 | 1351 | static inline void tcp_highest_sack_combine(struct sock *sk, | 
 | 1352 | 					    struct sk_buff *old, | 
 | 1353 | 					    struct sk_buff *new) | 
 | 1354 | { | 
 | 1355 | 	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) | 
 | 1356 | 		tcp_sk(sk)->highest_sack = new; | 
 | 1357 | } | 
 | 1358 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | /* /proc */ | 
 | 1360 | enum tcp_seq_states { | 
 | 1361 | 	TCP_SEQ_STATE_LISTENING, | 
 | 1362 | 	TCP_SEQ_STATE_OPENREQ, | 
 | 1363 | 	TCP_SEQ_STATE_ESTABLISHED, | 
 | 1364 | 	TCP_SEQ_STATE_TIME_WAIT, | 
 | 1365 | }; | 
 | 1366 |  | 
 | 1367 | struct tcp_seq_afinfo { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | 	char			*name; | 
 | 1369 | 	sa_family_t		family; | 
| Denis V. Lunev | 68fcadd | 2008-04-13 22:13:30 -0700 | [diff] [blame] | 1370 | 	struct file_operations	seq_fops; | 
| Denis V. Lunev | 9427c4b | 2008-04-13 22:12:13 -0700 | [diff] [blame] | 1371 | 	struct seq_operations	seq_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | }; | 
 | 1373 |  | 
 | 1374 | struct tcp_iter_state { | 
| Denis V. Lunev | a4146b1 | 2008-04-13 22:11:14 -0700 | [diff] [blame] | 1375 | 	struct seq_net_private	p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | 	sa_family_t		family; | 
 | 1377 | 	enum tcp_seq_states	state; | 
 | 1378 | 	struct sock		*syn_wait_sk; | 
 | 1379 | 	int			bucket, sbucket, num, uid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1380 | }; | 
 | 1381 |  | 
| Daniel Lezcano | 6f8b13b | 2008-03-21 04:14:45 -0700 | [diff] [blame] | 1382 | extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo); | 
 | 1383 | extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1384 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1385 | extern struct request_sock_ops tcp_request_sock_ops; | 
| Glenn Griffin | c6aefaf | 2008-02-07 21:49:26 -0800 | [diff] [blame] | 1386 | extern struct request_sock_ops tcp6_request_sock_ops; | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1387 |  | 
| Brian Haley | 7d06b2e | 2008-06-14 17:04:49 -0700 | [diff] [blame] | 1388 | extern void tcp_v4_destroy_sock(struct sock *sk); | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1389 |  | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1390 | extern int tcp_v4_gso_send_check(struct sk_buff *skb); | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1391 | extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); | 
| Herbert Xu | bf296b1 | 2008-12-15 23:43:36 -0800 | [diff] [blame] | 1392 | extern struct sk_buff **tcp_gro_receive(struct sk_buff **head, | 
 | 1393 | 					struct sk_buff *skb); | 
 | 1394 | extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head, | 
 | 1395 | 					 struct sk_buff *skb); | 
 | 1396 | extern int tcp_gro_complete(struct sk_buff *skb); | 
 | 1397 | extern int tcp4_gro_complete(struct sk_buff *skb); | 
| Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 1398 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1399 | #ifdef CONFIG_PROC_FS | 
 | 1400 | extern int  tcp4_proc_init(void); | 
 | 1401 | extern void tcp4_proc_exit(void); | 
 | 1402 | #endif | 
 | 1403 |  | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1404 | /* TCP af-specific functions */ | 
 | 1405 | struct tcp_sock_af_ops { | 
 | 1406 | #ifdef CONFIG_TCP_MD5SIG | 
 | 1407 | 	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk, | 
 | 1408 | 						struct sock *addr_sk); | 
 | 1409 | 	int			(*calc_md5_hash) (char *location, | 
 | 1410 | 						  struct tcp_md5sig_key *md5, | 
 | 1411 | 						  struct sock *sk, | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1412 | 						  struct request_sock *req, | 
| Adam Langley | 49a72df | 2008-07-19 00:01:42 -0700 | [diff] [blame] | 1413 | 						  struct sk_buff *skb); | 
| YOSHIFUJI Hideaki | cfb6eeb | 2006-11-14 19:07:45 -0800 | [diff] [blame] | 1414 | 	int			(*md5_add) (struct sock *sk, | 
 | 1415 | 					    struct sock *addr_sk, | 
 | 1416 | 					    u8 *newkey, | 
 | 1417 | 					    u8 len); | 
 | 1418 | 	int			(*md5_parse) (struct sock *sk, | 
 | 1419 | 					      char __user *optval, | 
 | 1420 | 					      int optlen); | 
 | 1421 | #endif | 
 | 1422 | }; | 
 | 1423 |  | 
 | 1424 | struct tcp_request_sock_ops { | 
 | 1425 | #ifdef CONFIG_TCP_MD5SIG | 
 | 1426 | 	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk, | 
 | 1427 | 						struct request_sock *req); | 
 | 1428 | #endif | 
 | 1429 | }; | 
 | 1430 |  | 
| Denis V. Lunev | 9b0f976 | 2008-02-29 11:13:15 -0800 | [diff] [blame] | 1431 | extern void tcp_v4_init(void); | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1432 | extern void tcp_init(void); | 
 | 1433 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | #endif	/* _TCP_H */ |