blob: ac37228b700137d84d6c7b69d152214a3dde782a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/list.h>
25#include <linux/tcp.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070030#include <linux/dmaengine.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080031#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080032#include <linux/cryptohash.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070033
34#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070035#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070036#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070038#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <net/sock.h>
40#include <net/snmp.h>
41#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070042#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070043#include <net/inet_ecn.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/seq_file.h>
46
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070047extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Eric Dumazetdd24c002008-11-25 21:17:14 -080049extern struct percpu_counter tcp_orphan_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050extern void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070053#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55/*
56 * Never offer a window over 32767 without using window scaling. Some
57 * poor stacks do signed 16bit maths!
58 */
59#define MAX_TCP_WINDOW 32767U
60
61/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
62#define TCP_MIN_MSS 88U
63
64/* Minimal RCV_MSS. */
65#define TCP_MIN_RCVMSS 536U
66
John Heffner5d424d52006-03-20 17:53:41 -080067/* The least MTU to use for probing */
68#define TCP_BASE_MSS 512
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070/* After receiving this amount of duplicate ACKs fast retransmit starts. */
71#define TCP_FASTRETRANS_THRESH 3
72
73/* Maximal reordering. */
74#define TCP_MAX_REORDERING 127
75
76/* Maximal number of ACKs sent quickly to accelerate slow-start. */
77#define TCP_MAX_QUICKACKS 16U
78
79/* urg_data states */
80#define TCP_URG_VALID 0x0100
81#define TCP_URG_NOTYET 0x0200
82#define TCP_URG_READ 0x0400
83
84#define TCP_RETR1 3 /*
85 * This is how many retries it does before it
86 * tries to figure out if the gateway is
87 * down. Minimal RFC value is 3; it corresponds
88 * to ~3sec-8min depending on RTO.
89 */
90
91#define TCP_RETR2 15 /*
92 * This should take at least
93 * 90 minutes to time out.
94 * RFC1122 says that the limit is 100 sec.
95 * 15 is ~13-30min depending on RTO.
96 */
97
98#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080099 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800102 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104
105#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
106 * socket. 7 is ~50sec-16min.
107 */
108
109
110#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
111 * state, about 60 seconds */
112#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
113 /* BSD style FIN_WAIT2 deadlock breaker.
114 * It used to be 3min, new value is 60sec,
115 * to combine FIN-WAIT-2 timeout with
116 * TIME-WAIT timer.
117 */
118
119#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
120#if HZ >= 100
121#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
122#define TCP_ATO_MIN ((unsigned)(HZ/25))
123#else
124#define TCP_DELACK_MIN 4U
125#define TCP_ATO_MIN 4U
126#endif
127#define TCP_RTO_MAX ((unsigned)(120*HZ))
128#define TCP_RTO_MIN ((unsigned)(HZ/5))
129#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
130
131#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
132 * for local resources.
133 */
134
135#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
136#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
137#define TCP_KEEPALIVE_INTVL (75*HZ)
138
139#define MAX_TCP_KEEPIDLE 32767
140#define MAX_TCP_KEEPINTVL 32767
141#define MAX_TCP_KEEPCNT 127
142#define MAX_TCP_SYNCNT 127
143
144#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
147#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
148 * after this time. It should be equal
149 * (or greater than) TCP_TIMEWAIT_LEN
150 * to provide reliability equal to one
151 * provided by timewait state.
152 */
153#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
154 * timestamps. It must be less than
155 * minimal timewait lifetime.
156 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157/*
158 * TCP option
159 */
160
161#define TCPOPT_NOP 1 /* Padding */
162#define TCPOPT_EOL 0 /* End of options */
163#define TCPOPT_MSS 2 /* Segment size negotiating */
164#define TCPOPT_WINDOW 3 /* Window scaling */
165#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
166#define TCPOPT_SACK 5 /* SACK Block */
167#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800168#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170/*
171 * TCP option lengths
172 */
173
174#define TCPOLEN_MSS 4
175#define TCPOLEN_WINDOW 3
176#define TCPOLEN_SACK_PERM 2
177#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800178#define TCPOLEN_MD5SIG 18
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180/* But this is what stacks really send out. */
181#define TCPOLEN_TSTAMP_ALIGNED 12
182#define TCPOLEN_WSCALE_ALIGNED 4
183#define TCPOLEN_SACKPERM_ALIGNED 4
184#define TCPOLEN_SACK_BASE 2
185#define TCPOLEN_SACK_BASE_ALIGNED 4
186#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800187#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700188#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190/* Flags in tp->nonagle */
191#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
192#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800193#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700195extern struct inet_timewait_death_row tcp_death_row;
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198extern int sysctl_tcp_timestamps;
199extern int sysctl_tcp_window_scaling;
200extern int sysctl_tcp_sack;
201extern int sysctl_tcp_fin_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202extern int sysctl_tcp_keepalive_time;
203extern int sysctl_tcp_keepalive_probes;
204extern int sysctl_tcp_keepalive_intvl;
205extern int sysctl_tcp_syn_retries;
206extern int sysctl_tcp_synack_retries;
207extern int sysctl_tcp_retries1;
208extern int sysctl_tcp_retries2;
209extern int sysctl_tcp_orphan_retries;
210extern int sysctl_tcp_syncookies;
211extern int sysctl_tcp_retrans_collapse;
212extern int sysctl_tcp_stdurg;
213extern int sysctl_tcp_rfc1337;
214extern int sysctl_tcp_abort_on_overflow;
215extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216extern int sysctl_tcp_fack;
217extern int sysctl_tcp_reordering;
218extern int sysctl_tcp_ecn;
219extern int sysctl_tcp_dsack;
220extern int sysctl_tcp_mem[3];
221extern int sysctl_tcp_wmem[3];
222extern int sysctl_tcp_rmem[3];
223extern int sysctl_tcp_app_win;
224extern int sysctl_tcp_adv_win_scale;
225extern int sysctl_tcp_tw_reuse;
226extern int sysctl_tcp_frto;
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800227extern int sysctl_tcp_frto_response;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228extern int sysctl_tcp_low_latency;
Chris Leech95937822006-05-23 18:02:55 -0700229extern int sysctl_tcp_dma_copybreak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231extern int sysctl_tcp_moderate_rcvbuf;
232extern int sysctl_tcp_tso_win_divisor;
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800233extern int sysctl_tcp_abc;
John Heffner5d424d52006-03-20 17:53:41 -0800234extern int sysctl_tcp_mtu_probing;
235extern int sysctl_tcp_base_mss;
Rick Jones15d99e02006-03-20 22:40:29 -0800236extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700237extern int sysctl_tcp_slow_start_after_idle;
John Heffner886236c2007-03-25 19:21:45 -0700238extern int sysctl_tcp_max_ssthresh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240extern atomic_t tcp_memory_allocated;
Eric Dumazet17483762008-11-25 21:16:35 -0800241extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242extern int tcp_memory_pressure;
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 * The next routines deal with comparing 32 bit unsigned ints
246 * and worry about wraparound (automatic with unsigned arithmetic).
247 */
248
249static inline int before(__u32 seq1, __u32 seq2)
250{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800251 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252}
Gerrit Renker9a036b92006-12-20 10:25:55 -0800253#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255/* is s2<=s1<=s3 ? */
256static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
257{
258 return seq3 - seq2 >= seq1 - seq2;
259}
260
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700261static inline int tcp_too_many_orphans(struct sock *sk, int num)
262{
263 return (num > sysctl_tcp_max_orphans) ||
264 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
265 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
266}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Florian Westphala0f82f62009-04-19 09:43:48 +0000268/* syncookies: remember time of last synqueue overflow */
269static inline void tcp_synq_overflow(struct sock *sk)
270{
271 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
272}
273
274/* syncookies: no recent synqueue overflow on this listening socket? */
275static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
276{
277 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
278 return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
279}
280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281extern struct proto tcp_prot;
282
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700283#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
284#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
285#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
286#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288extern void tcp_v4_err(struct sk_buff *skb, u32);
289
290extern void tcp_shutdown (struct sock *sk, int how);
291
292extern int tcp_v4_rcv(struct sk_buff *skb);
293
294extern int tcp_v4_remember_stamp(struct sock *sk);
295
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -0700296extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
David S. Miller3516ffb2007-08-02 19:23:56 -0700298extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 struct msghdr *msg, size_t size);
300extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
301
302extern int tcp_ioctl(struct sock *sk,
303 int cmd,
304 unsigned long arg);
305
306extern int tcp_rcv_state_process(struct sock *sk,
307 struct sk_buff *skb,
308 struct tcphdr *th,
309 unsigned len);
310
311extern int tcp_rcv_established(struct sock *sk,
312 struct sk_buff *skb,
313 struct tcphdr *th,
314 unsigned len);
315
316extern void tcp_rcv_space_adjust(struct sock *sk);
317
Chris Leech0e4b4992006-05-23 18:00:16 -0700318extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
319
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800320extern int tcp_twsk_unique(struct sock *sk,
321 struct sock *sktw, void *twp);
322
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800323extern void tcp_twsk_destructor(struct sock *sk);
324
Jens Axboe9c55e012007-11-06 23:30:13 -0800325extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
326 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
327
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700328static inline void tcp_dec_quickack_mode(struct sock *sk,
329 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700331 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415b2005-07-05 15:17:45 -0700332
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700333 if (icsk->icsk_ack.quick) {
334 if (pkts >= icsk->icsk_ack.quick) {
335 icsk->icsk_ack.quick = 0;
David S. Millerfc6415b2005-07-05 15:17:45 -0700336 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700337 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415b2005-07-05 15:17:45 -0700338 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700339 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 }
341}
342
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700343extern void tcp_enter_quickack_mode(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
346{
347 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
348}
349
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700350#define TCP_ECN_OK 1
351#define TCP_ECN_QUEUE_CWR 2
352#define TCP_ECN_DEMAND_CWR 4
353
354static __inline__ void
355TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
356{
357 if (sysctl_tcp_ecn && th->ece && th->cwr)
358 inet_rsk(req)->ecn_ok = 1;
359}
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361enum tcp_tw_status
362{
363 TCP_TW_SUCCESS = 0,
364 TCP_TW_RST = 1,
365 TCP_TW_ACK = 2,
366 TCP_TW_SYN = 3
367};
368
369
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -0700370extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 struct sk_buff *skb,
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -0700372 const struct tcphdr *th);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700375 struct request_sock *req,
376 struct request_sock **prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377extern int tcp_child_process(struct sock *parent,
378 struct sock *child,
379 struct sk_buff *skb);
Ilpo Järvinen46d0de42007-02-21 23:10:39 -0800380extern int tcp_use_frto(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381extern void tcp_enter_frto(struct sock *sk);
382extern void tcp_enter_loss(struct sock *sk, int how);
383extern void tcp_clear_retrans(struct tcp_sock *tp);
384extern void tcp_update_metrics(struct sock *sk);
385
386extern void tcp_close(struct sock *sk,
387 long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
389
390extern int tcp_getsockopt(struct sock *sk, int level,
391 int optname,
392 char __user *optval,
393 int __user *optlen);
394extern int tcp_setsockopt(struct sock *sk, int level,
395 int optname, char __user *optval,
396 int optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -0800397extern int compat_tcp_getsockopt(struct sock *sk,
398 int level, int optname,
399 char __user *optval, int __user *optlen);
400extern int compat_tcp_setsockopt(struct sock *sk,
401 int level, int optname,
402 char __user *optval, int optlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403extern void tcp_set_keepalive(struct sock *sk, int val);
404extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
405 struct msghdr *msg,
406 size_t len, int nonblock,
407 int flags, int *addr_len);
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409extern void tcp_parse_options(struct sk_buff *skb,
410 struct tcp_options_received *opt_rx,
411 int estab);
412
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900413extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415/*
416 * TCP v4 functions exported for the inet6 API
417 */
418
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800419extern void tcp_v4_send_check(struct sock *sk, int len,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 struct sk_buff *skb);
421
422extern int tcp_v4_conn_request(struct sock *sk,
423 struct sk_buff *skb);
424
425extern struct sock * tcp_create_openreq_child(struct sock *sk,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700426 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 struct sk_buff *skb);
428
429extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
430 struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700431 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 struct dst_entry *dst);
433
434extern int tcp_v4_do_rcv(struct sock *sk,
435 struct sk_buff *skb);
436
437extern int tcp_v4_connect(struct sock *sk,
438 struct sockaddr *uaddr,
439 int addr_len);
440
441extern int tcp_connect(struct sock *sk);
442
443extern struct sk_buff * tcp_make_synack(struct sock *sk,
444 struct dst_entry *dst,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700445 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447extern int tcp_disconnect(struct sock *sk, int flags);
448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450/* From syncookies.c */
Florian Westphal2051f112008-03-23 22:21:28 -0700451extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
453 struct ip_options *opt);
454extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
455 __u16 *mss);
456
Florian Westphal4dfc2812008-04-10 03:12:40 -0700457extern __u32 cookie_init_timestamp(struct request_sock *req);
458extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
459
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800460/* From net/ipv6/syncookies.c */
461extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
462extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
463 __u16 *mss);
464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465/* tcp_output.c */
466
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700467extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
468 int nonagle);
469extern int tcp_may_send_now(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
471extern void tcp_xmit_retransmit_queue(struct sock *);
472extern void tcp_simple_retransmit(struct sock *);
473extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
David S. Miller6475be12005-09-01 22:47:01 -0700474extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476extern void tcp_send_probe0(struct sock *);
477extern void tcp_send_partial(struct sock *);
478extern int tcp_write_wakeup(struct sock *);
479extern void tcp_send_fin(struct sock *sk);
Al Virodd0fc662005-10-07 07:46:04 +0100480extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481extern int tcp_send_synack(struct sock *);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700482extern void tcp_push_one(struct sock *, unsigned int mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483extern void tcp_send_ack(struct sock *sk);
484extern void tcp_send_delayed_ack(struct sock *sk);
485
David S. Millera762a982005-07-05 15:18:51 -0700486/* tcp_input.c */
487extern void tcp_cwnd_application_limited(struct sock *sk);
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489/* tcp_timer.c */
490extern void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700491static inline void tcp_clear_xmit_timers(struct sock *sk)
492{
493 inet_csk_clear_xmit_timers(sk);
494}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000497extern unsigned int tcp_current_mss(struct sock *sk);
498
499/* Bound MSS / TSO packet size with the half of the window */
500static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
501{
502 if (tp->max_window && pktsize > (tp->max_window >> 1))
503 return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
504 else
505 return pktsize;
506}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300508/* tcp.c */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509extern void tcp_get_info(struct sock *, struct tcp_info *);
510
511/* Read 'sendfile()'-style from a TCP socket */
512typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
513 unsigned int, size_t);
514extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
515 sk_read_actor_t recv_actor);
516
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800517extern void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
John Heffner5d424d52006-03-20 17:53:41 -0800519extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
520extern int tcp_mss_to_mtu(struct sock *sk, int mss);
521extern void tcp_mtup_init(struct sock *sk);
522
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800523static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
525 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
526 ntohl(TCP_FLAG_ACK) |
527 snd_wnd);
528}
529
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800530static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
532 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
533}
534
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700535static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700537 struct tcp_sock *tp = tcp_sk(sk);
538
David S. Millerb03efcf2005-07-08 14:57:23 -0700539 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 tp->rcv_wnd &&
541 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
542 !tp->urg_data)
543 tcp_fast_path_on(tp);
544}
545
546/* Compute the actual receive window we are currently advertising.
547 * Rcv_nxt can be after the window if our peer push more data
548 * than the offered window.
549 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800550static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551{
552 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
553
554 if (win < 0)
555 win = 0;
556 return (u32) win;
557}
558
559/* Choose a new window, without checks for shrinking, and without
560 * scaling applied to the result. The caller does these things
561 * if necessary. This is a "raw" window selection.
562 */
563extern u32 __tcp_select_window(struct sock *sk);
564
565/* TCP timestamps are only 32-bits, this causes a slight
566 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800567 * of jiffies in the buffer control blocks below. We decided
568 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 * casts with the following macro.
570 */
571#define tcp_time_stamp ((__u32)(jiffies))
572
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800573/* This is what the send packet queuing engine uses to pass
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 * TCP per-packet control information to the transmission
575 * code. We also store the host-order sequence numbers in
576 * here too. This is 36 bytes on 32-bit architectures,
577 * 40 bytes on 64-bit machines, if this grows please adjust
578 * skbuff.h:skbuff->cb[xxx] size appropriately.
579 */
580struct tcp_skb_cb {
581 union {
582 struct inet_skb_parm h4;
583#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
584 struct inet6_skb_parm h6;
585#endif
586 } header; /* For incoming frames */
587 __u32 seq; /* Starting sequence number */
588 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
589 __u32 when; /* used to compute rtt's */
590 __u8 flags; /* TCP header flags. */
591
592 /* NOTE: These must match up to the flags byte in a
593 * real TCP header.
594 */
595#define TCPCB_FLAG_FIN 0x01
596#define TCPCB_FLAG_SYN 0x02
597#define TCPCB_FLAG_RST 0x04
598#define TCPCB_FLAG_PSH 0x08
599#define TCPCB_FLAG_ACK 0x10
600#define TCPCB_FLAG_URG 0x20
601#define TCPCB_FLAG_ECE 0x40
602#define TCPCB_FLAG_CWR 0x80
603
604 __u8 sacked; /* State flags for SACK/FACK. */
605#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
606#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
607#define TCPCB_LOST 0x04 /* SKB is lost */
608#define TCPCB_TAGBITS 0x07 /* All tag bits */
609
610#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
611#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 __u32 ack_seq; /* Sequence number ACK'd */
614};
615
616#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618/* Due to TSO, an SKB can be composed of multiple actual
619 * packets. To keep these tracked properly, we use this.
620 */
621static inline int tcp_skb_pcount(const struct sk_buff *skb)
622{
Herbert Xu79671682006-06-22 02:40:14 -0700623 return skb_shinfo(skb)->gso_segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624}
625
626/* This is valid iff tcp_skb_pcount() > 1. */
627static inline int tcp_skb_mss(const struct sk_buff *skb)
628{
Herbert Xu79671682006-06-22 02:40:14 -0700629 return skb_shinfo(skb)->gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700632/* Events passed to congestion control interface */
633enum tcp_ca_event {
634 CA_EVENT_TX_START, /* first transmit when no packets in flight */
635 CA_EVENT_CWND_RESTART, /* congestion window restart */
636 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
637 CA_EVENT_FRTO, /* fast recovery timeout */
638 CA_EVENT_LOSS, /* loss timeout */
639 CA_EVENT_FAST_ACK, /* in sequence ack */
640 CA_EVENT_SLOW_ACK, /* other ack */
641};
642
643/*
644 * Interface for adding new TCP congestion control handlers
645 */
646#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800647#define TCP_CA_MAX 128
648#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
649
Stephen Hemminger164891a2007-04-23 22:26:16 -0700650#define TCP_CONG_NON_RESTRICTED 0x1
651#define TCP_CONG_RTT_STAMP 0x2
652
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700653struct tcp_congestion_ops {
654 struct list_head list;
Stephen Hemminger164891a2007-04-23 22:26:16 -0700655 unsigned long flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700656
657 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300658 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700659 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300660 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700661
662 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300663 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700664 /* lower bound for congestion window (optional) */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700665 u32 (*min_cwnd)(const struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700666 /* do new cwnd calculation (required) */
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200667 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700668 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300669 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700670 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300671 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700672 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300673 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700674 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700675 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300676 /* get info for inet_diag (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300677 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700678
679 char name[TCP_CA_NAME_MAX];
680 struct module *owner;
681};
682
683extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
684extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
685
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300686extern void tcp_init_congestion_control(struct sock *sk);
687extern void tcp_cleanup_congestion_control(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700688extern int tcp_set_default_congestion_control(const char *name);
689extern void tcp_get_default_congestion_control(char *name);
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800690extern void tcp_get_available_congestion_control(char *buf, size_t len);
Stephen Hemmingerce7bc3b2006-11-09 16:35:15 -0800691extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
692extern int tcp_set_allowed_congestion_control(char *allowed);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300693extern int tcp_set_congestion_control(struct sock *sk, const char *name);
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800694extern void tcp_slow_start(struct tcp_sock *tp);
Ilpo Järvinen758ce5c2009-02-28 04:44:37 +0000695extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700696
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700697extern struct tcp_congestion_ops tcp_init_congestion_ops;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300698extern u32 tcp_reno_ssthresh(struct sock *sk);
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200699extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700700extern u32 tcp_reno_min_cwnd(const struct sock *sk);
David S. Millera8acfba2005-06-23 23:45:02 -0700701extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700702
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300703static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700704{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300705 struct inet_connection_sock *icsk = inet_csk(sk);
706
707 if (icsk->icsk_ca_ops->set_state)
708 icsk->icsk_ca_ops->set_state(sk, ca_state);
709 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700710}
711
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300712static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700713{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300714 const struct inet_connection_sock *icsk = inet_csk(sk);
715
716 if (icsk->icsk_ca_ops->cwnd_event)
717 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700718}
719
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300720/* These functions determine how the current flow behaves in respect of SACK
721 * handling. SACK is negotiated with the peer, and therefore it can vary
722 * between different flows.
723 *
724 * tcp_is_sack - SACK enabled
725 * tcp_is_reno - No SACK
726 * tcp_is_fack - FACK enabled, implies SACK enabled
727 */
728static inline int tcp_is_sack(const struct tcp_sock *tp)
729{
730 return tp->rx_opt.sack_ok;
731}
732
733static inline int tcp_is_reno(const struct tcp_sock *tp)
734{
735 return !tcp_is_sack(tp);
736}
737
738static inline int tcp_is_fack(const struct tcp_sock *tp)
739{
740 return tp->rx_opt.sack_ok & 2;
741}
742
743static inline void tcp_enable_fack(struct tcp_sock *tp)
744{
745 tp->rx_opt.sack_ok |= 2;
746}
747
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300748static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
749{
750 return tp->sacked_out + tp->lost_out;
751}
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753/* This determines how many packets are "in the network" to the best
754 * of our knowledge. In many cases it is conservative, but where
755 * detailed information is available from the receiver (via SACK
756 * blocks etc.) we can make more aggressive calculations.
757 *
758 * Use this for decisions involving congestion control, use just
759 * tp->packets_out to determine if the send queue is empty or not.
760 *
761 * Read this equation as:
762 *
763 * "Packets sent once on transmission queue" MINUS
764 * "Packets left network, but not honestly ACKed yet" PLUS
765 * "Packets fast retransmitted"
766 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800767static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300769 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
771
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
773 * The exception is rate halving phase, when cwnd is decreasing towards
774 * ssthresh.
775 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300776static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300778 const struct tcp_sock *tp = tcp_sk(sk);
779 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 return tp->snd_ssthresh;
781 else
782 return max(tp->snd_ssthresh,
783 ((tp->snd_cwnd >> 1) +
784 (tp->snd_cwnd >> 2)));
785}
786
Ilpo Järvinenb9c45952007-07-27 16:36:17 +0300787/* Use define here intentionally to get WARN_ON location shown at the caller */
788#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800790extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
792
793/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700794 * it is safe "de facto". This will be the default - same as
795 * the default reordering threshold - but if reordering increases,
796 * we must be able to allow cwnd to burst at least this much in order
797 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 */
799static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
800{
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700801 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802}
803
Ilpo Järvinen90840de2007-12-31 04:48:41 -0800804/* Returns end sequence number of the receiver's advertised window */
805static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
806{
807 return tp->snd_una + tp->snd_wnd;
808}
Ilpo Järvinencea14e02008-01-12 03:19:12 -0800809extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -0800810
Chuck Leverc1bd24b2007-10-23 21:08:54 -0700811static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800812 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813{
814 if (skb->len < mss)
815 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
816}
817
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700818static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700820 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700821 const struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700822
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700823 if (!tp->packets_out && !icsk->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700824 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
825 icsk->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700828static inline void tcp_push_pending_frames(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700830 struct tcp_sock *tp = tcp_sk(sk);
831
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000832 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833}
834
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800835static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836{
837 tp->snd_wl1 = seq;
838}
839
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800840static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841{
842 tp->snd_wl1 = seq;
843}
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845/*
846 * Calculate(/check) TCP checksum
847 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -0800848static inline __sum16 tcp_v4_check(int len, __be32 saddr,
849 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850{
851 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
852}
853
Al Virob51655b2006-11-14 21:40:42 -0800854static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855{
Herbert Xufb286bb2005-11-10 13:01:24 -0800856 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800859static inline int tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
Herbert Xu60476372007-04-09 11:59:39 -0700861 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 __tcp_checksum_complete(skb);
863}
864
865/* Prequeue for VJ style copy to user, combined with checksumming. */
866
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800867static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868{
869 tp->ucopy.task = NULL;
870 tp->ucopy.len = 0;
871 tp->ucopy.memory = 0;
872 skb_queue_head_init(&tp->ucopy.prequeue);
Chris Leech97fc2f02006-05-23 17:55:33 -0700873#ifdef CONFIG_NET_DMA
874 tp->ucopy.dma_chan = NULL;
875 tp->ucopy.wakeup = 0;
876 tp->ucopy.pinned_list = NULL;
877 tp->ucopy.dma_cookie = 0;
878#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
880
881/* Packet is added to VJ-style prequeue for processing in process
882 * context, if a reader task is waiting. Apparently, this exciting
883 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
884 * failed somewhere. Latency? Burstiness? Well, at least now we will
885 * see, why it failed. 8)8) --ANK
886 *
887 * NOTE: is this not too big to inline?
888 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800889static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890{
891 struct tcp_sock *tp = tcp_sk(sk);
892
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000893 if (sysctl_tcp_low_latency || !tp->ucopy.task)
894 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000896 __skb_queue_tail(&tp->ucopy.prequeue, skb);
897 tp->ucopy.memory += skb->truesize;
898 if (tp->ucopy.memory > sk->sk_rcvbuf) {
899 struct sk_buff *skb1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000901 BUG_ON(sock_owned_by_user(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000903 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
904 sk_backlog_rcv(sk, skb1);
905 NET_INC_STATS_BH(sock_net(sk),
906 LINUX_MIB_TCPPREQUEUEDROPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000908
909 tp->ucopy.memory = 0;
910 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
911 wake_up_interruptible(sk->sk_sleep);
912 if (!inet_csk_ack_scheduled(sk))
913 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
914 (3 * TCP_RTO_MIN) / 4,
915 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000917 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
919
920
921#undef STATE_TRACE
922
923#ifdef STATE_TRACE
924static const char *statename[]={
925 "Unused","Established","Syn Sent","Syn Recv",
926 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
927 "Close Wait","Last ACK","Listen","Closing"
928};
929#endif
Ilpo Järvinen490d5042008-01-12 03:17:20 -0800930extern void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Andi Kleen4ac02ba2007-04-20 17:11:46 -0700932extern void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800934static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935{
936 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 rx_opt->num_sacks = 0;
938}
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940/* Determine a window scaling and initial window to offer. */
941extern void tcp_select_initial_window(int __space, __u32 mss,
942 __u32 *rcv_wnd, __u32 *window_clamp,
943 int wscale_ok, __u8 *rcv_wscale);
944
945static inline int tcp_win_from_space(int space)
946{
947 return sysctl_tcp_adv_win_scale<=0 ?
948 (space>>(-sysctl_tcp_adv_win_scale)) :
949 space - (space>>sysctl_tcp_adv_win_scale);
950}
951
952/* Note: caller must be prepared to deal with negative returns */
953static inline int tcp_space(const struct sock *sk)
954{
955 return tcp_win_from_space(sk->sk_rcvbuf -
956 atomic_read(&sk->sk_rmem_alloc));
957}
958
959static inline int tcp_full_space(const struct sock *sk)
960{
961 return tcp_win_from_space(sk->sk_rcvbuf);
962}
963
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800964static inline void tcp_openreq_init(struct request_sock *req,
965 struct tcp_options_received *rx_opt,
966 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700968 struct inet_request_sock *ireq = inet_rsk(req);
969
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
Florian Westphal4dfc2812008-04-10 03:12:40 -0700971 req->cookie_ts = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700972 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 req->mss = rx_opt->mss_clamp;
974 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700975 ireq->tstamp_ok = rx_opt->tstamp_ok;
976 ireq->sack_ok = rx_opt->sack_ok;
977 ireq->snd_wscale = rx_opt->snd_wscale;
978 ireq->wscale_ok = rx_opt->wscale_ok;
979 ireq->acked = 0;
980 ireq->ecn_ok = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700981 ireq->rmt_port = tcp_hdr(skb)->source;
KOVACS Krisztiana3116ac2008-10-01 07:46:49 -0700982 ireq->loc_port = tcp_hdr(skb)->dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
984
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -0700985extern void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987static inline int keepalive_intvl_when(const struct tcp_sock *tp)
988{
989 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
990}
991
992static inline int keepalive_time_when(const struct tcp_sock *tp)
993{
994 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
995}
996
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700997static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700999 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1000 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001002 if (fin_timeout < (rto << 2) - (rto >> 1))
1003 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 return fin_timeout;
1006}
1007
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001008static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1009 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001011 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1012 return 1;
1013 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1014 return 1;
1015
1016 return 0;
1017}
1018
1019static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1020 int rst)
1021{
1022 if (tcp_paws_check(rx_opt, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 return 0;
1024
1025 /* RST segments are not recommended to carry timestamp,
1026 and, if they do, it is recommended to ignore PAWS because
1027 "their cleanup function should take precedence over timestamps."
1028 Certainly, it is mistake. It is necessary to understand the reasons
1029 of this constraint to relax it: if peer reboots, clock may go
1030 out-of-sync and half-open connections will not be reset.
1031 Actually, the problem would be not existing if all
1032 the implementations followed draft about maintaining clock
1033 via reboots. Linux-2.2 DOES NOT!
1034
1035 However, we can relax time bounds for RST segments to MSL.
1036 */
James Morris9d729f72007-03-04 16:12:44 -08001037 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 return 0;
1039 return 1;
1040}
1041
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042#define TCP_CHECK_TIMER(sk) do { } while (0)
1043
Pavel Emelyanova9c19322008-07-16 20:21:42 -07001044static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045{
1046 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001047 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1048 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1049 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1050 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051}
1052
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001053/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001054static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001055{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001056 tp->lost_skb_hint = NULL;
1057 tp->scoreboard_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001058}
1059
1060static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1061{
1062 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001063 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001064}
1065
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001066/* MD5 Signature */
1067struct crypto_hash;
1068
1069/* - key database */
1070struct tcp_md5sig_key {
1071 u8 *key;
1072 u8 keylen;
1073};
1074
1075struct tcp4_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001076 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001077 __be32 addr;
1078};
1079
1080struct tcp6_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001081 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001082#if 0
1083 u32 scope_id; /* XXX */
1084#endif
1085 struct in6_addr addr;
1086};
1087
1088/* - sock block */
1089struct tcp_md5sig_info {
1090 struct tcp4_md5sig_key *keys4;
1091#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1092 struct tcp6_md5sig_key *keys6;
1093 u32 entries6;
1094 u32 alloced6;
1095#endif
1096 u32 entries4;
1097 u32 alloced4;
1098};
1099
1100/* - pseudo header */
1101struct tcp4_pseudohdr {
1102 __be32 saddr;
1103 __be32 daddr;
1104 __u8 pad;
1105 __u8 protocol;
1106 __be16 len;
1107};
1108
1109struct tcp6_pseudohdr {
1110 struct in6_addr saddr;
1111 struct in6_addr daddr;
1112 __be32 len;
1113 __be32 protocol; /* including padding */
1114};
1115
1116union tcp_md5sum_block {
1117 struct tcp4_pseudohdr ip4;
1118#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1119 struct tcp6_pseudohdr ip6;
1120#endif
1121};
1122
1123/* - pool: digest algorithm, hash description and scratch buffer */
1124struct tcp_md5sig_pool {
1125 struct hash_desc md5_desc;
1126 union tcp_md5sum_block md5_blk;
1127};
1128
1129#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
1130
1131/* - functions */
Adam Langley49a72df2008-07-19 00:01:42 -07001132extern int tcp_v4_md5_hash_skb(char *md5_hash,
1133 struct tcp_md5sig_key *key,
1134 struct sock *sk,
1135 struct request_sock *req,
1136 struct sk_buff *skb);
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +09001137
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001138extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1139 struct sock *addr_sk);
1140
1141extern int tcp_v4_md5_do_add(struct sock *sk,
1142 __be32 addr,
1143 u8 *newkey,
1144 u8 newkeylen);
1145
1146extern int tcp_v4_md5_do_del(struct sock *sk,
Al Viro8e5200f2006-11-20 18:06:37 -08001147 __be32 addr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001148
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001149#ifdef CONFIG_TCP_MD5SIG
1150#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1151 &(struct tcp_md5sig_key) { \
1152 .key = (twsk)->tw_md5_key, \
1153 .keylen = (twsk)->tw_md5_keylen, \
1154 } : NULL)
1155#else
1156#define tcp_twsk_md5_key(twsk) NULL
1157#endif
1158
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001159extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void);
1160extern void tcp_free_md5sig_pool(void);
1161
1162extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
1163extern void __tcp_put_md5sig_pool(void);
Adam Langley49a72df2008-07-19 00:01:42 -07001164extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1165extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1166 unsigned header_len);
1167extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1168 struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001169
1170static inline
1171struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
1172{
1173 int cpu = get_cpu();
1174 struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
1175 if (!ret)
1176 put_cpu();
1177 return ret;
1178}
1179
1180static inline void tcp_put_md5sig_pool(void)
1181{
1182 __tcp_put_md5sig_pool();
1183 put_cpu();
1184}
1185
David S. Millerfe067e82007-03-07 12:12:44 -08001186/* write queue abstraction */
1187static inline void tcp_write_queue_purge(struct sock *sk)
1188{
1189 struct sk_buff *skb;
1190
1191 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001192 sk_wmem_free_skb(sk, skb);
1193 sk_mem_reclaim(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001194}
1195
1196static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1197{
David S. Millercd07a8e2008-09-23 00:50:13 -07001198 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001199}
1200
1201static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1202{
David S. Millercd07a8e2008-09-23 00:50:13 -07001203 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001204}
1205
1206static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1207{
David S. Millercd07a8e2008-09-23 00:50:13 -07001208 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001209}
1210
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001211static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1212{
1213 return skb_queue_prev(&sk->sk_write_queue, skb);
1214}
1215
David S. Millerfe067e82007-03-07 12:12:44 -08001216#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001217 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001218
1219#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001220 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001221
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001222#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001223 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001224
David S. Millerfe067e82007-03-07 12:12:44 -08001225static inline struct sk_buff *tcp_send_head(struct sock *sk)
1226{
1227 return sk->sk_send_head;
1228}
1229
David S. Millercd07a8e2008-09-23 00:50:13 -07001230static inline bool tcp_skb_is_last(const struct sock *sk,
1231 const struct sk_buff *skb)
1232{
1233 return skb_queue_is_last(&sk->sk_write_queue, skb);
1234}
1235
David S. Millerfe067e82007-03-07 12:12:44 -08001236static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1237{
David S. Millercd07a8e2008-09-23 00:50:13 -07001238 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001239 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001240 else
1241 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001242}
1243
1244static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1245{
1246 if (sk->sk_send_head == skb_unlinked)
1247 sk->sk_send_head = NULL;
1248}
1249
1250static inline void tcp_init_send_head(struct sock *sk)
1251{
1252 sk->sk_send_head = NULL;
1253}
1254
1255static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1256{
1257 __skb_queue_tail(&sk->sk_write_queue, skb);
1258}
1259
1260static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1261{
1262 __tcp_add_write_queue_tail(sk, skb);
1263
1264 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001265 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001266 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001267
1268 if (tcp_sk(sk)->highest_sack == NULL)
1269 tcp_sk(sk)->highest_sack = skb;
1270 }
David S. Millerfe067e82007-03-07 12:12:44 -08001271}
1272
1273static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1274{
1275 __skb_queue_head(&sk->sk_write_queue, skb);
1276}
1277
1278/* Insert buff after skb on the write queue of sk. */
1279static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1280 struct sk_buff *buff,
1281 struct sock *sk)
1282{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001283 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001284}
1285
David S. Miller43f59c82008-09-21 21:28:51 -07001286/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001287static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1288 struct sk_buff *skb,
1289 struct sock *sk)
1290{
David S. Miller43f59c82008-09-21 21:28:51 -07001291 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001292
1293 if (sk->sk_send_head == skb)
1294 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001295}
1296
1297static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1298{
1299 __skb_unlink(skb, &sk->sk_write_queue);
1300}
1301
David S. Millerfe067e82007-03-07 12:12:44 -08001302static inline int tcp_write_queue_empty(struct sock *sk)
1303{
1304 return skb_queue_empty(&sk->sk_write_queue);
1305}
1306
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001307/* Start sequence of the highest skb with SACKed bit, valid only if
1308 * sacked > 0 or when the caller has ensured validity by itself.
1309 */
1310static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1311{
1312 if (!tp->sacked_out)
1313 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001314
1315 if (tp->highest_sack == NULL)
1316 return tp->snd_nxt;
1317
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001318 return TCP_SKB_CB(tp->highest_sack)->seq;
1319}
1320
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001321static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1322{
1323 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1324 tcp_write_queue_next(sk, skb);
1325}
1326
1327static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1328{
1329 return tcp_sk(sk)->highest_sack;
1330}
1331
1332static inline void tcp_highest_sack_reset(struct sock *sk)
1333{
1334 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1335}
1336
1337/* Called when old skb is about to be deleted (to be combined with new skb) */
1338static inline void tcp_highest_sack_combine(struct sock *sk,
1339 struct sk_buff *old,
1340 struct sk_buff *new)
1341{
1342 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1343 tcp_sk(sk)->highest_sack = new;
1344}
1345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346/* /proc */
1347enum tcp_seq_states {
1348 TCP_SEQ_STATE_LISTENING,
1349 TCP_SEQ_STATE_OPENREQ,
1350 TCP_SEQ_STATE_ESTABLISHED,
1351 TCP_SEQ_STATE_TIME_WAIT,
1352};
1353
1354struct tcp_seq_afinfo {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 char *name;
1356 sa_family_t family;
Denis V. Lunev68fcadd2008-04-13 22:13:30 -07001357 struct file_operations seq_fops;
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001358 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359};
1360
1361struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001362 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 sa_family_t family;
1364 enum tcp_seq_states state;
1365 struct sock *syn_wait_sk;
1366 int bucket, sbucket, num, uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367};
1368
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001369extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1370extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001372extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001373extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001374
Brian Haley7d06b2e2008-06-14 17:04:49 -07001375extern void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001376
Herbert Xua430a432006-07-08 13:34:56 -07001377extern int tcp_v4_gso_send_check(struct sk_buff *skb);
Herbert Xu576a30e2006-06-27 13:22:38 -07001378extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
Herbert Xubf296b12008-12-15 23:43:36 -08001379extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1380 struct sk_buff *skb);
1381extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1382 struct sk_buff *skb);
1383extern int tcp_gro_complete(struct sk_buff *skb);
1384extern int tcp4_gro_complete(struct sk_buff *skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001385
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001386#ifdef CONFIG_PROC_FS
1387extern int tcp4_proc_init(void);
1388extern void tcp4_proc_exit(void);
1389#endif
1390
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001391/* TCP af-specific functions */
1392struct tcp_sock_af_ops {
1393#ifdef CONFIG_TCP_MD5SIG
1394 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1395 struct sock *addr_sk);
1396 int (*calc_md5_hash) (char *location,
1397 struct tcp_md5sig_key *md5,
1398 struct sock *sk,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001399 struct request_sock *req,
Adam Langley49a72df2008-07-19 00:01:42 -07001400 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001401 int (*md5_add) (struct sock *sk,
1402 struct sock *addr_sk,
1403 u8 *newkey,
1404 u8 len);
1405 int (*md5_parse) (struct sock *sk,
1406 char __user *optval,
1407 int optlen);
1408#endif
1409};
1410
1411struct tcp_request_sock_ops {
1412#ifdef CONFIG_TCP_MD5SIG
1413 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1414 struct request_sock *req);
1415#endif
1416};
1417
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08001418extern void tcp_v4_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001419extern void tcp_init(void);
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421#endif /* _TCP_H */