blob: 4ab18e08d3d1cf41a0e962cb1575dbc64d6e30ff [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define FASTRETRANS_DEBUG 1
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/list.h>
24#include <linux/tcp.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050025#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070030#include <linux/dmaengine.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080031#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080032#include <linux/cryptohash.h>
William Allen Simpson435cf552009-12-02 18:17:05 +000033#include <linux/kref.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070034
35#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070036#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070037#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070039#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070043#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070044#include <net/inet_ecn.h>
Satoru SATOH0c266892009-05-04 11:11:01 -070045#include <net/dst.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/seq_file.h>
Glauber Costa180d8cd2011-12-11 21:47:02 +000048#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070050extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Eric Dumazetdd24c002008-11-25 21:17:14 -080052extern struct percpu_counter tcp_orphan_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053extern void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070056#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58/*
59 * Never offer a window over 32767 without using window scaling. Some
60 * poor stacks do signed 16bit maths!
61 */
62#define MAX_TCP_WINDOW 32767U
63
Nandita Dukkipati356f0392010-12-20 14:15:56 +000064/* Offer an initial receive window of 10 mss. */
65#define TCP_DEFAULT_INIT_RCVWND 10
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68#define TCP_MIN_MSS 88U
69
John Heffner5d424d52006-03-20 17:53:41 -080070/* The least MTU to use for probing */
71#define TCP_BASE_MSS 512
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* After receiving this amount of duplicate ACKs fast retransmit starts. */
74#define TCP_FASTRETRANS_THRESH 3
75
76/* Maximal reordering. */
77#define TCP_MAX_REORDERING 127
78
79/* Maximal number of ACKs sent quickly to accelerate slow-start. */
80#define TCP_MAX_QUICKACKS 16U
81
82/* urg_data states */
83#define TCP_URG_VALID 0x0100
84#define TCP_URG_NOTYET 0x0200
85#define TCP_URG_READ 0x0400
86
87#define TCP_RETR1 3 /*
88 * This is how many retries it does before it
89 * tries to figure out if the gateway is
90 * down. Minimal RFC value is 3; it corresponds
91 * to ~3sec-8min depending on RTO.
92 */
93
94#define TCP_RETR2 15 /*
95 * This should take at least
96 * 90 minutes to time out.
97 * RFC1122 says that the limit is 100 sec.
98 * 15 is ~13-30min depending on RTO.
99 */
100
101#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800102 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800105 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
108 * state, about 60 seconds */
109#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
110 /* BSD style FIN_WAIT2 deadlock breaker.
111 * It used to be 3min, new value is 60sec,
112 * to combine FIN-WAIT-2 timeout with
113 * TIME-WAIT timer.
114 */
115
116#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
117#if HZ >= 100
118#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
119#define TCP_ATO_MIN ((unsigned)(HZ/25))
120#else
121#define TCP_DELACK_MIN 4U
122#define TCP_ATO_MIN 4U
123#endif
124#define TCP_RTO_MAX ((unsigned)(120*HZ))
125#define TCP_RTO_MIN ((unsigned)(HZ/5))
Jerry Chu9ad7c042011-06-08 11:08:38 +0000126#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC2988bis initial RTO value */
127#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
128 * used as a fallback RTO for the
129 * initial data transmission if no
130 * valid RTT sample has been acquired,
131 * most likely due to retrans in 3WHS.
132 */
Mahesh A Saptasagar7f285eb2014-02-11 13:39:05 +0530133/* Number of full MSS to receive before Acking RFC2581 */
134#define TCP_DELACK_SEG 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
137 * for local resources.
138 */
139
140#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
141#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
142#define TCP_KEEPALIVE_INTVL (75*HZ)
143
144#define MAX_TCP_KEEPIDLE 32767
145#define MAX_TCP_KEEPINTVL 32767
146#define MAX_TCP_KEEPCNT 127
147#define MAX_TCP_SYNCNT 127
148
149#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
151#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
152#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
153 * after this time. It should be equal
154 * (or greater than) TCP_TIMEWAIT_LEN
155 * to provide reliability equal to one
156 * provided by timewait state.
157 */
158#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
159 * timestamps. It must be less than
160 * minimal timewait lifetime.
161 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/*
163 * TCP option
164 */
165
166#define TCPOPT_NOP 1 /* Padding */
167#define TCPOPT_EOL 0 /* End of options */
168#define TCPOPT_MSS 2 /* Segment size negotiating */
169#define TCPOPT_WINDOW 3 /* Window scaling */
170#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
171#define TCPOPT_SACK 5 /* SACK Block */
172#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800173#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
William Allen Simpson435cf552009-12-02 18:17:05 +0000174#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176/*
177 * TCP option lengths
178 */
179
180#define TCPOLEN_MSS 4
181#define TCPOLEN_WINDOW 3
182#define TCPOLEN_SACK_PERM 2
183#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800184#define TCPOLEN_MD5SIG 18
William Allen Simpson435cf552009-12-02 18:17:05 +0000185#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
186#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
187#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
188#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190/* But this is what stacks really send out. */
191#define TCPOLEN_TSTAMP_ALIGNED 12
192#define TCPOLEN_WSCALE_ALIGNED 4
193#define TCPOLEN_SACKPERM_ALIGNED 4
194#define TCPOLEN_SACK_BASE 2
195#define TCPOLEN_SACK_BASE_ALIGNED 4
196#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800197#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700198#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/* Flags in tp->nonagle */
201#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
202#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800203#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Andreas Petlund36e31b02010-02-18 02:47:01 +0000205/* TCP thin-stream limits */
206#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
207
David S. Miller7eb38522011-02-05 18:13:45 -0800208/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
David S. Miller442b9632011-02-02 17:05:11 -0800209#define TCP_INIT_CWND 10
210
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700211extern struct inet_timewait_death_row tcp_death_row;
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214extern int sysctl_tcp_timestamps;
215extern int sysctl_tcp_window_scaling;
216extern int sysctl_tcp_sack;
217extern int sysctl_tcp_fin_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218extern int sysctl_tcp_keepalive_time;
219extern int sysctl_tcp_keepalive_probes;
220extern int sysctl_tcp_keepalive_intvl;
221extern int sysctl_tcp_syn_retries;
222extern int sysctl_tcp_synack_retries;
223extern int sysctl_tcp_retries1;
224extern int sysctl_tcp_retries2;
225extern int sysctl_tcp_orphan_retries;
226extern int sysctl_tcp_syncookies;
227extern int sysctl_tcp_retrans_collapse;
228extern int sysctl_tcp_stdurg;
229extern int sysctl_tcp_rfc1337;
230extern int sysctl_tcp_abort_on_overflow;
231extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232extern int sysctl_tcp_fack;
233extern int sysctl_tcp_reordering;
234extern int sysctl_tcp_ecn;
235extern int sysctl_tcp_dsack;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236extern int sysctl_tcp_wmem[3];
237extern int sysctl_tcp_rmem[3];
238extern int sysctl_tcp_app_win;
239extern int sysctl_tcp_adv_win_scale;
240extern int sysctl_tcp_tw_reuse;
241extern int sysctl_tcp_frto;
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800242extern int sysctl_tcp_frto_response;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243extern int sysctl_tcp_low_latency;
Chris Leech95937822006-05-23 18:02:55 -0700244extern int sysctl_tcp_dma_copybreak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246extern int sysctl_tcp_moderate_rcvbuf;
247extern int sysctl_tcp_tso_win_divisor;
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800248extern int sysctl_tcp_abc;
John Heffner5d424d52006-03-20 17:53:41 -0800249extern int sysctl_tcp_mtu_probing;
250extern int sysctl_tcp_base_mss;
Rick Jones15d99e02006-03-20 22:40:29 -0800251extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700252extern int sysctl_tcp_slow_start_after_idle;
John Heffner886236c2007-03-25 19:21:45 -0700253extern int sysctl_tcp_max_ssthresh;
William Allen Simpson519855c2009-12-02 18:14:19 +0000254extern int sysctl_tcp_cookie_size;
Andreas Petlund36e31b02010-02-18 02:47:01 +0000255extern int sysctl_tcp_thin_linear_timeouts;
Andreas Petlund7e380172010-02-18 04:48:19 +0000256extern int sysctl_tcp_thin_dupack;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Eric Dumazet8d987e52010-11-09 23:24:26 +0000258extern atomic_long_t tcp_memory_allocated;
Mahesh A Saptasagar7f285eb2014-02-11 13:39:05 +0530259
260/* sysctl variables for controlling various tcp parameters */
261extern int sysctl_tcp_delack_seg;
262extern int sysctl_tcp_use_userconfig;
Eric Dumazet17483762008-11-25 21:16:35 -0800263extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264extern int tcp_memory_pressure;
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 * The next routines deal with comparing 32 bit unsigned ints
268 * and worry about wraparound (automatic with unsigned arithmetic).
269 */
270
271static inline int before(__u32 seq1, __u32 seq2)
272{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800273 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
Gerrit Renker9a036b92006-12-20 10:25:55 -0800275#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277/* is s2<=s1<=s3 ? */
278static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
279{
280 return seq3 - seq2 >= seq1 - seq2;
281}
282
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800283static inline bool tcp_out_of_memory(struct sock *sk)
284{
285 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
286 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
287 return true;
288 return false;
289}
290
David S. Millerad1af0f2010-08-25 02:27:49 -0700291static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700292{
David S. Millerad1af0f2010-08-25 02:27:49 -0700293 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
294 int orphans = percpu_counter_read_positive(ocp);
295
296 if (orphans << shift > sysctl_tcp_max_orphans) {
297 orphans = percpu_counter_sum_positive(ocp);
298 if (orphans << shift > sysctl_tcp_max_orphans)
299 return true;
300 }
David S. Millerad1af0f2010-08-25 02:27:49 -0700301 return false;
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700302}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800304extern bool tcp_check_oom(struct sock *sk, int shift);
305
Florian Westphala0f82f62009-04-19 09:43:48 +0000306/* syncookies: remember time of last synqueue overflow */
307static inline void tcp_synq_overflow(struct sock *sk)
308{
309 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
310}
311
312/* syncookies: no recent synqueue overflow on this listening socket? */
313static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
314{
315 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
Jerry Chu9ad7c042011-06-08 11:08:38 +0000316 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
Florian Westphala0f82f62009-04-19 09:43:48 +0000317}
318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319extern struct proto tcp_prot;
320
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700321#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
322#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
323#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
324#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Tom Herbertaa2ea052010-04-22 07:00:24 +0000325#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
Glauber Costa4acb4192012-01-30 01:20:17 +0000327extern void tcp_init_mem(struct net *net);
328
Changli Gao53d31762010-07-10 20:41:06 +0000329extern void tcp_v4_err(struct sk_buff *skb, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Changli Gao53d31762010-07-10 20:41:06 +0000331extern void tcp_shutdown (struct sock *sk, int how);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Changli Gao53d31762010-07-10 20:41:06 +0000333extern int tcp_v4_rcv(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
David S. Miller3f419d22010-11-29 13:37:14 -0800335extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
David S. Millerccb7c412010-12-01 18:09:13 -0800336extern void *tcp_v4_tw_get_peer(struct sock *sk);
Changli Gao53d31762010-07-10 20:41:06 +0000337extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
Changli Gao7ba42912010-07-10 20:41:55 +0000338extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
339 size_t size);
340extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
341 size_t size, int flags);
Changli Gao53d31762010-07-10 20:41:06 +0000342extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
343extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400344 const struct tcphdr *th, unsigned int len);
Changli Gao53d31762010-07-10 20:41:06 +0000345extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400346 const struct tcphdr *th, unsigned int len);
Changli Gao53d31762010-07-10 20:41:06 +0000347extern void tcp_rcv_space_adjust(struct sock *sk);
348extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
349extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
350extern void tcp_twsk_destructor(struct sock *sk);
351extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
352 struct pipe_inode_info *pipe, size_t len,
353 unsigned int flags);
Mahesh A Saptasagar7f285eb2014-02-11 13:39:05 +0530354/* sysctl master controller */
355extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *, int,
356 void __user *, size_t *, loff_t *);
357extern int tcp_proc_delayed_ack_control(struct ctl_table *, int,
358 void __user *, size_t *, loff_t *);
Jens Axboe9c55e012007-11-06 23:30:13 -0800359
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700360static inline void tcp_dec_quickack_mode(struct sock *sk,
361 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700363 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415b2005-07-05 15:17:45 -0700364
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700365 if (icsk->icsk_ack.quick) {
366 if (pkts >= icsk->icsk_ack.quick) {
367 icsk->icsk_ack.quick = 0;
David S. Millerfc6415b2005-07-05 15:17:45 -0700368 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700369 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415b2005-07-05 15:17:45 -0700370 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700371 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
373}
374
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700375#define TCP_ECN_OK 1
376#define TCP_ECN_QUEUE_CWR 2
377#define TCP_ECN_DEMAND_CWR 4
Eric Dumazet7a269ff2011-09-22 20:02:19 +0000378#define TCP_ECN_SEEN 8
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700379
380static __inline__ void
381TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
382{
383 if (sysctl_tcp_ecn && th->ece && th->cwr)
384 inet_rsk(req)->ecn_ok = 1;
385}
386
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000387enum tcp_tw_status {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 TCP_TW_SUCCESS = 0,
389 TCP_TW_RST = 1,
390 TCP_TW_ACK = 2,
391 TCP_TW_SYN = 3
392};
393
394
Changli Gao53d31762010-07-10 20:41:06 +0000395extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
396 struct sk_buff *skb,
397 const struct tcphdr *th);
398extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
399 struct request_sock *req,
400 struct request_sock **prev);
401extern int tcp_child_process(struct sock *parent, struct sock *child,
402 struct sk_buff *skb);
403extern int tcp_use_frto(struct sock *sk);
404extern void tcp_enter_frto(struct sock *sk);
405extern void tcp_enter_loss(struct sock *sk, int how);
406extern void tcp_clear_retrans(struct tcp_sock *tp);
407extern void tcp_update_metrics(struct sock *sk);
408extern void tcp_close(struct sock *sk, long timeout);
409extern unsigned int tcp_poll(struct file * file, struct socket *sock,
410 struct poll_table_struct *wait);
411extern int tcp_getsockopt(struct sock *sk, int level, int optname,
412 char __user *optval, int __user *optlen);
413extern int tcp_setsockopt(struct sock *sk, int level, int optname,
414 char __user *optval, unsigned int optlen);
415extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
416 char __user *optval, int __user *optlen);
417extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
418 char __user *optval, unsigned int optlen);
419extern void tcp_set_keepalive(struct sock *sk, int val);
420extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
421extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
422 size_t len, int nonblock, int flags, int *addr_len);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400423extern void tcp_parse_options(const struct sk_buff *skb,
424 struct tcp_options_received *opt_rx, const u8 **hvpp,
Changli Gao53d31762010-07-10 20:41:06 +0000425 int estab);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400426extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428/*
429 * TCP v4 functions exported for the inet6 API
430 */
431
Changli Gao53d31762010-07-10 20:41:06 +0000432extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
433extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
434extern struct sock * tcp_create_openreq_child(struct sock *sk,
435 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 struct sk_buff *skb);
Changli Gao53d31762010-07-10 20:41:06 +0000437extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
438 struct request_sock *req,
439 struct dst_entry *dst);
440extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
441extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
442 int addr_len);
443extern int tcp_connect(struct sock *sk);
444extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
445 struct request_sock *req,
446 struct request_values *rvp);
447extern int tcp_disconnect(struct sock *sk, int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450/* From syncookies.c */
Florian Westphal2051f112008-03-23 22:21:28 -0700451extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
453 struct ip_options *opt);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400454#ifdef CONFIG_SYN_COOKIES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
456 __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400457#else
458static inline __u32 cookie_v4_init_sequence(struct sock *sk,
459 struct sk_buff *skb,
460 __u16 *mss)
461{
462 return 0;
463}
464#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Florian Westphal4dfc2812008-04-10 03:12:40 -0700466extern __u32 cookie_init_timestamp(struct request_sock *req);
Florian Westphal172d69e2010-06-21 11:48:45 +0000467extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
Florian Westphal4dfc2812008-04-10 03:12:40 -0700468
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800469/* From net/ipv6/syncookies.c */
470extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400471#ifdef CONFIG_SYN_COOKIES
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400472extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800473 __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400474#else
475static inline __u32 cookie_v6_init_sequence(struct sock *sk,
476 struct sk_buff *skb,
477 __u16 *mss)
478{
479 return 0;
480}
481#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482/* tcp_output.c */
483
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700484extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
485 int nonagle);
486extern int tcp_may_send_now(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000488extern void tcp_retransmit_timer(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489extern void tcp_xmit_retransmit_queue(struct sock *);
490extern void tcp_simple_retransmit(struct sock *);
491extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
David S. Miller6475be12005-09-01 22:47:01 -0700492extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494extern void tcp_send_probe0(struct sock *);
495extern void tcp_send_partial(struct sock *);
Changli Gao53d31762010-07-10 20:41:06 +0000496extern int tcp_write_wakeup(struct sock *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497extern void tcp_send_fin(struct sock *sk);
Al Virodd0fc662005-10-07 07:46:04 +0100498extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
Changli Gao53d31762010-07-10 20:41:06 +0000499extern int tcp_send_synack(struct sock *);
Eric Dumazet946cedc2011-08-30 03:21:44 +0000500extern int tcp_syn_flood_action(struct sock *sk,
501 const struct sk_buff *skb,
502 const char *proto);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700503extern void tcp_push_one(struct sock *, unsigned int mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504extern void tcp_send_ack(struct sock *sk);
505extern void tcp_send_delayed_ack(struct sock *sk);
506
David S. Millera762a982005-07-05 15:18:51 -0700507/* tcp_input.c */
508extern void tcp_cwnd_application_limited(struct sock *sk);
509
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510/* tcp_timer.c */
511extern void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700512static inline void tcp_clear_xmit_timers(struct sock *sk)
513{
514 inet_csk_clear_xmit_timers(sk);
515}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000518extern unsigned int tcp_current_mss(struct sock *sk);
519
520/* Bound MSS / TSO packet size with the half of the window */
521static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
522{
Alexey Kuznetsov01f83d62010-09-15 10:27:52 -0700523 int cutoff;
524
525 /* When peer uses tiny windows, there is no use in packetizing
526 * to sub-MSS pieces for the sake of SWS or making sure there
527 * are enough packets in the pipe for fast recovery.
528 *
529 * On the other hand, for extremely large MSS devices, handling
530 * smaller than MSS windows in this way does make sense.
531 */
532 if (tp->max_window >= 512)
533 cutoff = (tp->max_window >> 1);
534 else
535 cutoff = tp->max_window;
536
537 if (cutoff && pktsize > cutoff)
538 return max_t(int, cutoff, 68U - tp->tcp_header_len);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000539 else
540 return pktsize;
541}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300543/* tcp.c */
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400544extern void tcp_get_info(const struct sock *, struct tcp_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
546/* Read 'sendfile()'-style from a TCP socket */
547typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
548 unsigned int, size_t);
549extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
550 sk_read_actor_t recv_actor);
551
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800552extern void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400554extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu);
555extern int tcp_mss_to_mtu(const struct sock *sk, int mss);
John Heffner5d424d52006-03-20 17:53:41 -0800556extern void tcp_mtup_init(struct sock *sk);
Jerry Chu9ad7c042011-06-08 11:08:38 +0000557extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
John Heffner5d424d52006-03-20 17:53:41 -0800558
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000559static inline void tcp_bound_rto(const struct sock *sk)
560{
561 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
562 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
563}
564
565static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
566{
567 return (tp->srtt >> 3) + tp->rttvar;
568}
569
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800570static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
572 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
573 ntohl(TCP_FLAG_ACK) |
574 snd_wnd);
575}
576
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800577static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578{
579 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
580}
581
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700582static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700584 struct tcp_sock *tp = tcp_sk(sk);
585
David S. Millerb03efcf2005-07-08 14:57:23 -0700586 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 tp->rcv_wnd &&
588 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
589 !tp->urg_data)
590 tcp_fast_path_on(tp);
591}
592
Satoru SATOH0c266892009-05-04 11:11:01 -0700593/* Compute the actual rto_min value */
594static inline u32 tcp_rto_min(struct sock *sk)
595{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400596 const struct dst_entry *dst = __sk_dst_get(sk);
Satoru SATOH0c266892009-05-04 11:11:01 -0700597 u32 rto_min = TCP_RTO_MIN;
598
599 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
600 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
601 return rto_min;
602}
603
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604/* Compute the actual receive window we are currently advertising.
605 * Rcv_nxt can be after the window if our peer push more data
606 * than the offered window.
607 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800608static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
610 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
611
612 if (win < 0)
613 win = 0;
614 return (u32) win;
615}
616
617/* Choose a new window, without checks for shrinking, and without
618 * scaling applied to the result. The caller does these things
619 * if necessary. This is a "raw" window selection.
620 */
Changli Gao53d31762010-07-10 20:41:06 +0000621extern u32 __tcp_select_window(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623/* TCP timestamps are only 32-bits, this causes a slight
624 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800625 * of jiffies in the buffer control blocks below. We decided
626 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 * casts with the following macro.
628 */
629#define tcp_time_stamp ((__u32)(jiffies))
630
Changli Gaoa3433f32010-06-12 14:01:43 +0000631#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
632
633#define TCPHDR_FIN 0x01
634#define TCPHDR_SYN 0x02
635#define TCPHDR_RST 0x04
636#define TCPHDR_PSH 0x08
637#define TCPHDR_ACK 0x10
638#define TCPHDR_URG 0x20
639#define TCPHDR_ECE 0x40
640#define TCPHDR_CWR 0x80
641
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800642/* This is what the send packet queuing engine uses to pass
Eric Dumazetf86586f2010-07-15 21:41:00 -0700643 * TCP per-packet control information to the transmission code.
644 * We also store the host-order sequence numbers in here too.
645 * This is 44 bytes if IPV6 is enabled.
646 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 */
648struct tcp_skb_cb {
649 union {
650 struct inet_skb_parm h4;
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000651#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 struct inet6_skb_parm h6;
653#endif
654 } header; /* For incoming frames */
655 __u32 seq; /* Starting sequence number */
656 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
657 __u32 when; /* used to compute rtt's */
Eric Dumazet4de075e2011-09-27 13:25:05 -0400658 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 __u8 sacked; /* State flags for SACK/FACK. */
660#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
661#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
662#define TCPCB_LOST 0x04 /* SKB is lost */
663#define TCPCB_TAGBITS 0x07 /* All tag bits */
Eric Dumazetb82d1bb2011-09-27 02:20:08 -0400664 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
665 /* 1 byte hole */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
667#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
668
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 __u32 ack_seq; /* Sequence number ACK'd */
670};
671
672#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674/* Due to TSO, an SKB can be composed of multiple actual
675 * packets. To keep these tracked properly, we use this.
676 */
677static inline int tcp_skb_pcount(const struct sk_buff *skb)
678{
Herbert Xu79671682006-06-22 02:40:14 -0700679 return skb_shinfo(skb)->gso_segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680}
681
682/* This is valid iff tcp_skb_pcount() > 1. */
683static inline int tcp_skb_mss(const struct sk_buff *skb)
684{
Herbert Xu79671682006-06-22 02:40:14 -0700685 return skb_shinfo(skb)->gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700688/* Events passed to congestion control interface */
689enum tcp_ca_event {
690 CA_EVENT_TX_START, /* first transmit when no packets in flight */
691 CA_EVENT_CWND_RESTART, /* congestion window restart */
692 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
693 CA_EVENT_FRTO, /* fast recovery timeout */
694 CA_EVENT_LOSS, /* loss timeout */
695 CA_EVENT_FAST_ACK, /* in sequence ack */
696 CA_EVENT_SLOW_ACK, /* other ack */
697};
698
699/*
700 * Interface for adding new TCP congestion control handlers
701 */
702#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800703#define TCP_CA_MAX 128
704#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
705
Stephen Hemminger164891a2007-04-23 22:26:16 -0700706#define TCP_CONG_NON_RESTRICTED 0x1
707#define TCP_CONG_RTT_STAMP 0x2
708
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700709struct tcp_congestion_ops {
710 struct list_head list;
Stephen Hemminger164891a2007-04-23 22:26:16 -0700711 unsigned long flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700712
713 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300714 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700715 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300716 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700717
718 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300719 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700720 /* lower bound for congestion window (optional) */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700721 u32 (*min_cwnd)(const struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700722 /* do new cwnd calculation (required) */
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200723 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700724 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300725 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700726 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300727 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700728 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300729 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700730 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700731 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300732 /* get info for inet_diag (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300733 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700734
735 char name[TCP_CA_NAME_MAX];
736 struct module *owner;
737};
738
739extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
740extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
741
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300742extern void tcp_init_congestion_control(struct sock *sk);
743extern void tcp_cleanup_congestion_control(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700744extern int tcp_set_default_congestion_control(const char *name);
745extern void tcp_get_default_congestion_control(char *name);
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800746extern void tcp_get_available_congestion_control(char *buf, size_t len);
Stephen Hemmingerce7bc3b2006-11-09 16:35:15 -0800747extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
748extern int tcp_set_allowed_congestion_control(char *allowed);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300749extern int tcp_set_congestion_control(struct sock *sk, const char *name);
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800750extern void tcp_slow_start(struct tcp_sock *tp);
Ilpo Järvinen758ce5c2009-02-28 04:44:37 +0000751extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700752
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700753extern struct tcp_congestion_ops tcp_init_congestion_ops;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300754extern u32 tcp_reno_ssthresh(struct sock *sk);
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200755extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700756extern u32 tcp_reno_min_cwnd(const struct sock *sk);
David S. Millera8acfba2005-06-23 23:45:02 -0700757extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700758
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300759static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700760{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300761 struct inet_connection_sock *icsk = inet_csk(sk);
762
763 if (icsk->icsk_ca_ops->set_state)
764 icsk->icsk_ca_ops->set_state(sk, ca_state);
765 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700766}
767
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300768static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700769{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300770 const struct inet_connection_sock *icsk = inet_csk(sk);
771
772 if (icsk->icsk_ca_ops->cwnd_event)
773 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700774}
775
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300776/* These functions determine how the current flow behaves in respect of SACK
777 * handling. SACK is negotiated with the peer, and therefore it can vary
778 * between different flows.
779 *
780 * tcp_is_sack - SACK enabled
781 * tcp_is_reno - No SACK
782 * tcp_is_fack - FACK enabled, implies SACK enabled
783 */
784static inline int tcp_is_sack(const struct tcp_sock *tp)
785{
786 return tp->rx_opt.sack_ok;
787}
788
789static inline int tcp_is_reno(const struct tcp_sock *tp)
790{
791 return !tcp_is_sack(tp);
792}
793
794static inline int tcp_is_fack(const struct tcp_sock *tp)
795{
Vijay Subramanianab562222011-12-20 13:23:24 +0000796 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300797}
798
799static inline void tcp_enable_fack(struct tcp_sock *tp)
800{
Vijay Subramanianab562222011-12-20 13:23:24 +0000801 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300802}
803
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300804static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
805{
806 return tp->sacked_out + tp->lost_out;
807}
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809/* This determines how many packets are "in the network" to the best
810 * of our knowledge. In many cases it is conservative, but where
811 * detailed information is available from the receiver (via SACK
812 * blocks etc.) we can make more aggressive calculations.
813 *
814 * Use this for decisions involving congestion control, use just
815 * tp->packets_out to determine if the send queue is empty or not.
816 *
817 * Read this equation as:
818 *
819 * "Packets sent once on transmission queue" MINUS
820 * "Packets left network, but not honestly ACKed yet" PLUS
821 * "Packets fast retransmitted"
822 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800823static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300825 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700828#define TCP_INFINITE_SSTHRESH 0x7fffffff
829
830static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
831{
832 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
833}
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
836 * The exception is rate halving phase, when cwnd is decreasing towards
837 * ssthresh.
838 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300839static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300841 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400842
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300843 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 return tp->snd_ssthresh;
845 else
846 return max(tp->snd_ssthresh,
847 ((tp->snd_cwnd >> 1) +
848 (tp->snd_cwnd >> 2)));
849}
850
Ilpo Järvinenb9c45952007-07-27 16:36:17 +0300851/* Use define here intentionally to get WARN_ON location shown at the caller */
852#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800854extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400855extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Neal Cardwell6b5a5c02011-11-21 17:15:14 +0000857/* The maximum number of MSS of available cwnd for which TSO defers
858 * sending if not using sysctl_tcp_tso_win_divisor.
859 */
860static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
861{
862 return 3;
863}
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700866 * it is safe "de facto". This will be the default - same as
867 * the default reordering threshold - but if reordering increases,
868 * we must be able to allow cwnd to burst at least this much in order
869 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 */
871static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
872{
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700873 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874}
875
Ilpo Järvinen90840de2007-12-31 04:48:41 -0800876/* Returns end sequence number of the receiver's advertised window */
877static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
878{
879 return tp->snd_una + tp->snd_wnd;
880}
Ilpo Järvinencea14e02008-01-12 03:19:12 -0800881extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -0800882
Chuck Leverc1bd24b2007-10-23 21:08:54 -0700883static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800884 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885{
886 if (skb->len < mss)
887 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
888}
889
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700890static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400892 const struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700893 const struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700894
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700895 if (!tp->packets_out && !icsk->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700896 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
897 icsk->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898}
899
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800900static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901{
902 tp->snd_wl1 = seq;
903}
904
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800905static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
907 tp->snd_wl1 = seq;
908}
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910/*
911 * Calculate(/check) TCP checksum
912 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -0800913static inline __sum16 tcp_v4_check(int len, __be32 saddr,
914 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915{
916 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
917}
918
Al Virob51655b2006-11-14 21:40:42 -0800919static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
Herbert Xufb286bb2005-11-10 13:01:24 -0800921 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922}
923
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800924static inline int tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925{
Herbert Xu60476372007-04-09 11:59:39 -0700926 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 __tcp_checksum_complete(skb);
928}
929
930/* Prequeue for VJ style copy to user, combined with checksumming. */
931
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800932static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933{
934 tp->ucopy.task = NULL;
935 tp->ucopy.len = 0;
936 tp->ucopy.memory = 0;
937 skb_queue_head_init(&tp->ucopy.prequeue);
Chris Leech97fc2f02006-05-23 17:55:33 -0700938#ifdef CONFIG_NET_DMA
939 tp->ucopy.dma_chan = NULL;
940 tp->ucopy.wakeup = 0;
941 tp->ucopy.pinned_list = NULL;
942 tp->ucopy.dma_cookie = 0;
943#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
945
946/* Packet is added to VJ-style prequeue for processing in process
947 * context, if a reader task is waiting. Apparently, this exciting
948 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
949 * failed somewhere. Latency? Burstiness? Well, at least now we will
950 * see, why it failed. 8)8) --ANK
951 *
952 * NOTE: is this not too big to inline?
953 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800954static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955{
956 struct tcp_sock *tp = tcp_sk(sk);
957
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000958 if (sysctl_tcp_low_latency || !tp->ucopy.task)
959 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000961 __skb_queue_tail(&tp->ucopy.prequeue, skb);
962 tp->ucopy.memory += skb->truesize;
963 if (tp->ucopy.memory > sk->sk_rcvbuf) {
964 struct sk_buff *skb1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000966 BUG_ON(sock_owned_by_user(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000968 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
969 sk_backlog_rcv(sk, skb1);
970 NET_INC_STATS_BH(sock_net(sk),
971 LINUX_MIB_TCPPREQUEUEDROPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000973
974 tp->ucopy.memory = 0;
975 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
Eric Dumazetaa395142010-04-20 13:03:51 +0000976 wake_up_interruptible_sync_poll(sk_sleep(sk),
Eric Dumazet7aedec22009-05-07 07:20:39 +0000977 POLLIN | POLLRDNORM | POLLRDBAND);
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000978 if (!inet_csk_ack_scheduled(sk))
979 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
David S. Miller22f6dac2009-05-08 02:48:30 -0700980 (3 * tcp_rto_min(sk)) / 4,
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000981 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000983 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
985
986
987#undef STATE_TRACE
988
989#ifdef STATE_TRACE
990static const char *statename[]={
991 "Unused","Established","Syn Sent","Syn Recv",
992 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
993 "Close Wait","Last ACK","Listen","Closing"
994};
995#endif
Ilpo Järvinen490d5042008-01-12 03:17:20 -0800996extern void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Andi Kleen4ac02ba2007-04-20 17:11:46 -0700998extern void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001000static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
1002 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 rx_opt->num_sacks = 0;
1004}
1005
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006/* Determine a window scaling and initial window to offer. */
1007extern void tcp_select_initial_window(int __space, __u32 mss,
1008 __u32 *rcv_wnd, __u32 *window_clamp,
laurent chavey31d12922009-12-15 11:15:28 +00001009 int wscale_ok, __u8 *rcv_wscale,
1010 __u32 init_rcv_wnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012static inline int tcp_win_from_space(int space)
1013{
1014 return sysctl_tcp_adv_win_scale<=0 ?
1015 (space>>(-sysctl_tcp_adv_win_scale)) :
1016 space - (space>>sysctl_tcp_adv_win_scale);
1017}
1018
1019/* Note: caller must be prepared to deal with negative returns */
1020static inline int tcp_space(const struct sock *sk)
1021{
1022 return tcp_win_from_space(sk->sk_rcvbuf -
1023 atomic_read(&sk->sk_rmem_alloc));
1024}
1025
1026static inline int tcp_full_space(const struct sock *sk)
1027{
1028 return tcp_win_from_space(sk->sk_rcvbuf);
1029}
1030
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001031static inline void tcp_openreq_init(struct request_sock *req,
1032 struct tcp_options_received *rx_opt,
1033 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001035 struct inet_request_sock *ireq = inet_rsk(req);
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
Florian Westphal4dfc2812008-04-10 03:12:40 -07001038 req->cookie_ts = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001039 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 req->mss = rx_opt->mss_clamp;
1041 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001042 ireq->tstamp_ok = rx_opt->tstamp_ok;
1043 ireq->sack_ok = rx_opt->sack_ok;
1044 ireq->snd_wscale = rx_opt->snd_wscale;
1045 ireq->wscale_ok = rx_opt->wscale_ok;
1046 ireq->acked = 0;
1047 ireq->ecn_ok = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001048 ireq->rmt_port = tcp_hdr(skb)->source;
KOVACS Krisztiana3116ac2008-10-01 07:46:49 -07001049 ireq->loc_port = tcp_hdr(skb)->dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050}
1051
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001052extern void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1055{
1056 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1057}
1058
1059static inline int keepalive_time_when(const struct tcp_sock *tp)
1060{
1061 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1062}
1063
Eric Dumazetdf19a622009-08-28 23:48:54 -07001064static inline int keepalive_probes(const struct tcp_sock *tp)
1065{
1066 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1067}
1068
Flavio Leitner6c37e5d2010-04-26 18:33:27 +00001069static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1070{
1071 const struct inet_connection_sock *icsk = &tp->inet_conn;
1072
1073 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1074 tcp_time_stamp - tp->rcv_tstamp);
1075}
1076
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001077static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001079 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1080 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001082 if (fin_timeout < (rto << 2) - (rto >> 1))
1083 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
1085 return fin_timeout;
1086}
1087
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001088static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1089 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001091 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1092 return 1;
1093 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1094 return 1;
Eric Dumazetbc2ce892010-12-16 14:08:34 -08001095 /*
1096 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1097 * then following tcp messages have valid values. Ignore 0 value,
1098 * or else 'negative' tsval might forbid us to accept their packets.
1099 */
1100 if (!rx_opt->ts_recent)
1101 return 1;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001102 return 0;
1103}
1104
1105static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1106 int rst)
1107{
1108 if (tcp_paws_check(rx_opt, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 return 0;
1110
1111 /* RST segments are not recommended to carry timestamp,
1112 and, if they do, it is recommended to ignore PAWS because
1113 "their cleanup function should take precedence over timestamps."
1114 Certainly, it is mistake. It is necessary to understand the reasons
1115 of this constraint to relax it: if peer reboots, clock may go
1116 out-of-sync and half-open connections will not be reset.
1117 Actually, the problem would be not existing if all
1118 the implementations followed draft about maintaining clock
1119 via reboots. Linux-2.2 DOES NOT!
1120
1121 However, we can relax time bounds for RST segments to MSL.
1122 */
James Morris9d729f72007-03-04 16:12:44 -08001123 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 return 0;
1125 return 1;
1126}
1127
Pavel Emelyanova9c19322008-07-16 20:21:42 -07001128static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129{
1130 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001131 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1132 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1133 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1134 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135}
1136
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001137/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001138static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001139{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001140 tp->lost_skb_hint = NULL;
1141 tp->scoreboard_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001142}
1143
1144static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1145{
1146 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001147 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001148}
1149
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001150/* MD5 Signature */
1151struct crypto_hash;
1152
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001153union tcp_md5_addr {
1154 struct in_addr a4;
1155#if IS_ENABLED(CONFIG_IPV6)
1156 struct in6_addr a6;
1157#endif
1158};
1159
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001160/* - key database */
1161struct tcp_md5sig_key {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001162 struct hlist_node node;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001163 u8 keylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001164 u8 family; /* AF_INET or AF_INET6 */
1165 union tcp_md5_addr addr;
1166 u8 key[TCP_MD5SIG_MAXKEYLEN];
1167 struct rcu_head rcu;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001168};
1169
1170/* - sock block */
1171struct tcp_md5sig_info {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001172 struct hlist_head head;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001173 struct rcu_head rcu;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001174};
1175
1176/* - pseudo header */
1177struct tcp4_pseudohdr {
1178 __be32 saddr;
1179 __be32 daddr;
1180 __u8 pad;
1181 __u8 protocol;
1182 __be16 len;
1183};
1184
1185struct tcp6_pseudohdr {
1186 struct in6_addr saddr;
1187 struct in6_addr daddr;
1188 __be32 len;
1189 __be32 protocol; /* including padding */
1190};
1191
1192union tcp_md5sum_block {
1193 struct tcp4_pseudohdr ip4;
Eric Dumazetdfd56b82011-12-10 09:48:31 +00001194#if IS_ENABLED(CONFIG_IPV6)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001195 struct tcp6_pseudohdr ip6;
1196#endif
1197};
1198
1199/* - pool: digest algorithm, hash description and scratch buffer */
1200struct tcp_md5sig_pool {
1201 struct hash_desc md5_desc;
1202 union tcp_md5sum_block md5_blk;
1203};
1204
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001205/* - functions */
Changli Gao53d31762010-07-10 20:41:06 +00001206extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001207 const struct sock *sk,
1208 const struct request_sock *req,
1209 const struct sk_buff *skb);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001210extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1211 int family, const u8 *newkey,
1212 u8 newkeylen, gfp_t gfp);
1213extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1214 int family);
1215extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1216 struct sock *addr_sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001217
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001218#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001219extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1220 const union tcp_md5_addr *addr, int family);
1221#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001222#else
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001223static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1224 const union tcp_md5_addr *addr,
1225 int family)
1226{
1227 return NULL;
1228}
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001229#define tcp_twsk_md5_key(twsk) NULL
1230#endif
1231
Eric Dumazet765cf992011-09-12 20:28:37 +00001232extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
Changli Gao53d31762010-07-10 20:41:06 +00001233extern void tcp_free_md5sig_pool(void);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001234
Eric Dumazet35790c02010-05-16 00:34:04 -07001235extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
Changli Gao53d31762010-07-10 20:41:06 +00001236extern void tcp_put_md5sig_pool(void);
Eric Dumazet35790c02010-05-16 00:34:04 -07001237
Eric Dumazetca35a0e2011-10-24 01:52:35 -04001238extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001239extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
Adam Langley49a72df2008-07-19 00:01:42 -07001240 unsigned header_len);
1241extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001242 const struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001243
David S. Millerfe067e82007-03-07 12:12:44 -08001244/* write queue abstraction */
1245static inline void tcp_write_queue_purge(struct sock *sk)
1246{
1247 struct sk_buff *skb;
1248
1249 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001250 sk_wmem_free_skb(sk, skb);
1251 sk_mem_reclaim(sk);
Ilpo Järvinen8818a9d2009-12-02 22:24:02 -08001252 tcp_clear_all_retrans_hints(tcp_sk(sk));
David S. Millerfe067e82007-03-07 12:12:44 -08001253}
1254
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001255static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001256{
David S. Millercd07a8e2008-09-23 00:50:13 -07001257 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001258}
1259
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001260static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001261{
David S. Millercd07a8e2008-09-23 00:50:13 -07001262 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001263}
1264
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001265static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1266 const struct sk_buff *skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001267{
David S. Millercd07a8e2008-09-23 00:50:13 -07001268 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001269}
1270
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001271static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1272 const struct sk_buff *skb)
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001273{
1274 return skb_queue_prev(&sk->sk_write_queue, skb);
1275}
1276
David S. Millerfe067e82007-03-07 12:12:44 -08001277#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001278 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001279
1280#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001281 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001282
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001283#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001284 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001285
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001286static inline struct sk_buff *tcp_send_head(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001287{
1288 return sk->sk_send_head;
1289}
1290
David S. Millercd07a8e2008-09-23 00:50:13 -07001291static inline bool tcp_skb_is_last(const struct sock *sk,
1292 const struct sk_buff *skb)
1293{
1294 return skb_queue_is_last(&sk->sk_write_queue, skb);
1295}
1296
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001297static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001298{
David S. Millercd07a8e2008-09-23 00:50:13 -07001299 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001300 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001301 else
1302 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001303}
1304
1305static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1306{
1307 if (sk->sk_send_head == skb_unlinked)
1308 sk->sk_send_head = NULL;
1309}
1310
1311static inline void tcp_init_send_head(struct sock *sk)
1312{
1313 sk->sk_send_head = NULL;
1314}
1315
1316static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1317{
1318 __skb_queue_tail(&sk->sk_write_queue, skb);
1319}
1320
1321static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1322{
1323 __tcp_add_write_queue_tail(sk, skb);
1324
1325 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001326 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001327 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001328
1329 if (tcp_sk(sk)->highest_sack == NULL)
1330 tcp_sk(sk)->highest_sack = skb;
1331 }
David S. Millerfe067e82007-03-07 12:12:44 -08001332}
1333
1334static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1335{
1336 __skb_queue_head(&sk->sk_write_queue, skb);
1337}
1338
1339/* Insert buff after skb on the write queue of sk. */
1340static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1341 struct sk_buff *buff,
1342 struct sock *sk)
1343{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001344 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001345}
1346
David S. Miller43f59c82008-09-21 21:28:51 -07001347/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001348static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1349 struct sk_buff *skb,
1350 struct sock *sk)
1351{
David S. Miller43f59c82008-09-21 21:28:51 -07001352 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001353
1354 if (sk->sk_send_head == skb)
1355 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001356}
1357
1358static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1359{
1360 __skb_unlink(skb, &sk->sk_write_queue);
1361}
1362
David S. Millerfe067e82007-03-07 12:12:44 -08001363static inline int tcp_write_queue_empty(struct sock *sk)
1364{
1365 return skb_queue_empty(&sk->sk_write_queue);
1366}
1367
Krishna Kumar12d50c42009-12-08 22:26:13 +00001368static inline void tcp_push_pending_frames(struct sock *sk)
1369{
1370 if (tcp_send_head(sk)) {
1371 struct tcp_sock *tp = tcp_sk(sk);
1372
1373 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1374 }
1375}
1376
Neal Cardwellecb97192012-02-27 17:52:52 -05001377/* Start sequence of the skb just after the highest skb with SACKed
1378 * bit, valid only if sacked_out > 0 or when the caller has ensured
1379 * validity by itself.
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001380 */
1381static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1382{
1383 if (!tp->sacked_out)
1384 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001385
1386 if (tp->highest_sack == NULL)
1387 return tp->snd_nxt;
1388
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001389 return TCP_SKB_CB(tp->highest_sack)->seq;
1390}
1391
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001392static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1393{
1394 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1395 tcp_write_queue_next(sk, skb);
1396}
1397
1398static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1399{
1400 return tcp_sk(sk)->highest_sack;
1401}
1402
1403static inline void tcp_highest_sack_reset(struct sock *sk)
1404{
1405 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1406}
1407
1408/* Called when old skb is about to be deleted (to be combined with new skb) */
1409static inline void tcp_highest_sack_combine(struct sock *sk,
1410 struct sk_buff *old,
1411 struct sk_buff *new)
1412{
1413 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1414 tcp_sk(sk)->highest_sack = new;
1415}
1416
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001417/* Determines whether this is a thin stream (which may suffer from
1418 * increased latency). Used to trigger latency-reducing mechanisms.
1419 */
1420static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
1421{
1422 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1423}
1424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425/* /proc */
1426enum tcp_seq_states {
1427 TCP_SEQ_STATE_LISTENING,
1428 TCP_SEQ_STATE_OPENREQ,
1429 TCP_SEQ_STATE_ESTABLISHED,
1430 TCP_SEQ_STATE_TIME_WAIT,
1431};
1432
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001433int tcp_seq_open(struct inode *inode, struct file *file);
1434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435struct tcp_seq_afinfo {
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001436 char *name;
1437 sa_family_t family;
1438 const struct file_operations *seq_fops;
1439 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440};
1441
1442struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001443 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 sa_family_t family;
1445 enum tcp_seq_states state;
1446 struct sock *syn_wait_sk;
Tom Herberta8b690f2010-06-07 00:43:42 -07001447 int bucket, offset, sbucket, num, uid;
1448 loff_t last_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449};
1450
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001451extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1452extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001454extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001455extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001456
Brian Haley7d06b2e2008-06-14 17:04:49 -07001457extern void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001458
Herbert Xua430a432006-07-08 13:34:56 -07001459extern int tcp_v4_gso_send_check(struct sk_buff *skb);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001460extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1461 netdev_features_t features);
Herbert Xubf296b12008-12-15 23:43:36 -08001462extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1463 struct sk_buff *skb);
1464extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1465 struct sk_buff *skb);
1466extern int tcp_gro_complete(struct sk_buff *skb);
1467extern int tcp4_gro_complete(struct sk_buff *skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001468
Robert Love2365d052008-05-12 17:08:29 -04001469extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
1470
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001471#ifdef CONFIG_PROC_FS
Changli Gao53d31762010-07-10 20:41:06 +00001472extern int tcp4_proc_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001473extern void tcp4_proc_exit(void);
1474#endif
1475
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001476/* TCP af-specific functions */
1477struct tcp_sock_af_ops {
1478#ifdef CONFIG_TCP_MD5SIG
1479 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1480 struct sock *addr_sk);
1481 int (*calc_md5_hash) (char *location,
1482 struct tcp_md5sig_key *md5,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001483 const struct sock *sk,
1484 const struct request_sock *req,
1485 const struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001486 int (*md5_parse) (struct sock *sk,
1487 char __user *optval,
1488 int optlen);
1489#endif
1490};
1491
1492struct tcp_request_sock_ops {
1493#ifdef CONFIG_TCP_MD5SIG
1494 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1495 struct request_sock *req);
John Dykstrae3afe7b2009-07-16 05:04:51 +00001496 int (*calc_md5_hash) (char *location,
1497 struct tcp_md5sig_key *md5,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001498 const struct sock *sk,
1499 const struct request_sock *req,
1500 const struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001501#endif
1502};
1503
William Allen Simpsonda5c78c2009-12-02 18:12:09 +00001504/* Using SHA1 for now, define some constants.
1505 */
1506#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1507#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1508#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1509
1510extern int tcp_cookie_generator(u32 *bakery);
1511
William Allen Simpson435cf552009-12-02 18:17:05 +00001512/**
1513 * struct tcp_cookie_values - each socket needs extra space for the
1514 * cookies, together with (optional) space for any SYN data.
1515 *
1516 * A tcp_sock contains a pointer to the current value, and this is
1517 * cloned to the tcp_timewait_sock.
1518 *
1519 * @cookie_pair: variable data from the option exchange.
1520 *
1521 * @cookie_desired: user specified tcpct_cookie_desired. Zero
1522 * indicates default (sysctl_tcp_cookie_size).
1523 * After cookie sent, remembers size of cookie.
1524 * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
1525 *
1526 * @s_data_desired: user specified tcpct_s_data_desired. When the
1527 * constant payload is specified (@s_data_constant),
1528 * holds its length instead.
1529 * Range 0 to TCP_MSS_DESIRED.
1530 *
1531 * @s_data_payload: constant data that is to be included in the
1532 * payload of SYN or SYNACK segments when the
1533 * cookie option is present.
1534 */
1535struct tcp_cookie_values {
1536 struct kref kref;
1537 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1538 u8 cookie_pair_size;
1539 u8 cookie_desired;
1540 u16 s_data_desired:11,
1541 s_data_constant:1,
1542 s_data_in:1,
1543 s_data_out:1,
1544 s_data_unused:2;
1545 u8 s_data_payload[0];
1546};
1547
1548static inline void tcp_cookie_values_release(struct kref *kref)
1549{
1550 kfree(container_of(kref, struct tcp_cookie_values, kref));
1551}
1552
1553/* The length of constant payload data. Note that s_data_desired is
1554 * overloaded, depending on s_data_constant: either the length of constant
1555 * data (returned here) or the limit on variable data.
1556 */
1557static inline int tcp_s_data_size(const struct tcp_sock *tp)
1558{
1559 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1560 ? tp->cookie_values->s_data_desired
1561 : 0;
1562}
1563
1564/**
1565 * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
1566 *
1567 * As tcp_request_sock has already been extended in other places, the
1568 * only remaining method is to pass stack values along as function
1569 * parameters. These parameters are not needed after sending SYNACK.
1570 *
1571 * @cookie_bakery: cryptographic secret and message workspace.
1572 *
1573 * @cookie_plus: bytes in authenticator/cookie option, copied from
1574 * struct tcp_options_received (above).
1575 */
1576struct tcp_extend_values {
1577 struct request_values rv;
1578 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1579 u8 cookie_plus:6,
1580 cookie_out_never:1,
1581 cookie_in_always:1;
1582};
1583
1584static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1585{
1586 return (struct tcp_extend_values *)rvp;
1587}
1588
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08001589extern void tcp_v4_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001590extern void tcp_init(void);
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592#endif /* _TCP_H */