blob: 81b9a52c50c606c3a8bb98f5fc664fa5e341a165 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 */
22
23#include <linux/config.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/sysctl.h>
27#include <linux/workqueue.h>
28#include <net/tcp.h>
29#include <net/inet_common.h>
30#include <net/xfrm.h>
31
32#ifdef CONFIG_SYSCTL
33#define SYNC_INIT 0 /* let the user enable it */
34#else
35#define SYNC_INIT 1
36#endif
37
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070038/* New-style handling of TIME_WAIT sockets. */
39
40static void inet_twdr_hangman(unsigned long data);
41static void inet_twdr_twkill_work(void *data);
42static void inet_twdr_twcal_tick(unsigned long data);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44int sysctl_tcp_syncookies = SYNC_INIT;
45int sysctl_tcp_abort_on_overflow;
46
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070047struct inet_timewait_death_row tcp_death_row = {
48 .sysctl_max_tw_buckets = NR_FILE * 2,
49 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
50 .death_lock = SPIN_LOCK_UNLOCKED,
51 .hashinfo = &tcp_hashinfo,
52 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
53 (unsigned long)&tcp_death_row),
54 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
55 inet_twdr_twkill_work,
56 &tcp_death_row),
57/* Short-time timewait calendar */
58
59 .twcal_hand = -1,
60 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
61 (unsigned long)&tcp_death_row),
62};
63
64EXPORT_SYMBOL_GPL(tcp_death_row);
65
66static void inet_twsk_schedule(struct inet_timewait_sock *tw,
67 struct inet_timewait_death_row *twdr,
68 const int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
71{
72 if (seq == s_win)
73 return 1;
74 if (after(end_seq, s_win) && before(seq, e_win))
75 return 1;
76 return (seq == e_win && seq == end_seq);
77}
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079/*
80 * * Main purpose of TIME-WAIT state is to close connection gracefully,
81 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
82 * (and, probably, tail of data) and one or more our ACKs are lost.
83 * * What is TIME-WAIT timeout? It is associated with maximal packet
84 * lifetime in the internet, which results in wrong conclusion, that
85 * it is set to catch "old duplicate segments" wandering out of their path.
86 * It is not quite correct. This timeout is calculated so that it exceeds
87 * maximal retransmission timeout enough to allow to lose one (or more)
88 * segments sent by peer and our ACKs. This time may be calculated from RTO.
89 * * When TIME-WAIT socket receives RST, it means that another end
90 * finally closed and we are allowed to kill TIME-WAIT too.
91 * * Second purpose of TIME-WAIT is catching old duplicate segments.
92 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
93 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
94 * * If we invented some more clever way to catch duplicates
95 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
96 *
97 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
98 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
99 * from the very beginning.
100 *
101 * NOTE. With recycling (and later with fin-wait-2) TW bucket
102 * is _not_ stateless. It means, that strictly speaking we must
103 * spinlock it. I do not want! Well, probability of misbehaviour
104 * is ridiculously low and, seems, we could use some mb() tricks
105 * to avoid misread sequence numbers, states etc. --ANK
106 */
107enum tcp_tw_status
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700108tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
109 const struct tcphdr *th)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700111 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 struct tcp_options_received tmp_opt;
113 int paws_reject = 0;
114
115 tmp_opt.saw_tstamp = 0;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700116 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 tcp_parse_options(skb, &tmp_opt, 0);
118
119 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700120 tmp_opt.ts_recent = tcptw->tw_ts_recent;
121 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
123 }
124 }
125
126 if (tw->tw_substate == TCP_FIN_WAIT2) {
127 /* Just repeat all the checks of tcp_rcv_state_process() */
128
129 /* Out of window, send ACK */
130 if (paws_reject ||
131 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700132 tcptw->tw_rcv_nxt,
133 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 return TCP_TW_ACK;
135
136 if (th->rst)
137 goto kill;
138
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700139 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 goto kill_with_rst;
141
142 /* Dup ACK? */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700143 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700145 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 return TCP_TW_SUCCESS;
147 }
148
149 /* New data or FIN. If new data arrive after half-duplex close,
150 * reset.
151 */
152 if (!th->fin ||
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700153 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154kill_with_rst:
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700155 inet_twsk_deschedule(tw, &tcp_death_row);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700156 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 return TCP_TW_RST;
158 }
159
160 /* FIN arrived, enter true time-wait state. */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700161 tw->tw_substate = TCP_TIME_WAIT;
162 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700164 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
165 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 }
167
168 /* I am shamed, but failed to make it more elegant.
169 * Yes, it is direct reference to IP, which is impossible
170 * to generalize to IPv6. Taking into account that IPv6
171 * do not undertsnad recycling in any case, it not
172 * a big problem in practice. --ANK */
173 if (tw->tw_family == AF_INET &&
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700174 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 tcp_v4_tw_remember_stamp(tw))
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700176 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 else
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700178 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 return TCP_TW_ACK;
180 }
181
182 /*
183 * Now real TIME-WAIT state.
184 *
185 * RFC 1122:
186 * "When a connection is [...] on TIME-WAIT state [...]
187 * [a TCP] MAY accept a new SYN from the remote TCP to
188 * reopen the connection directly, if it:
189 *
190 * (1) assigns its initial sequence number for the new
191 * connection to be larger than the largest sequence
192 * number it used on the previous connection incarnation,
193 * and
194 *
195 * (2) returns to TIME-WAIT state if the SYN turns out
196 * to be an old duplicate".
197 */
198
199 if (!paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700200 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
202 /* In window segment, it may be only reset or bare ack. */
203
204 if (th->rst) {
205 /* This is TIME_WAIT assasination, in two flavors.
206 * Oh well... nobody has a sufficient solution to this
207 * protocol bug yet.
208 */
209 if (sysctl_tcp_rfc1337 == 0) {
210kill:
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700211 inet_twsk_deschedule(tw, &tcp_death_row);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700212 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 return TCP_TW_SUCCESS;
214 }
215 }
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700216 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700219 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
220 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 }
222
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700223 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 return TCP_TW_SUCCESS;
225 }
226
227 /* Out of window segment.
228
229 All the segments are ACKed immediately.
230
231 The only exception is new SYN. We accept it, if it is
232 not old duplicate and we are not in danger to be killed
233 by delayed old duplicates. RFC check is that it has
234 newer sequence number works at rates <40Mbit/sec.
235 However, if paws works, it is reliable AND even more,
236 we even may relax silly seq space cutoff.
237
238 RED-PEN: we violate main RFC requirement, if this SYN will appear
239 old duplicate (i.e. we receive RST in reply to SYN-ACK),
240 we must return socket to time-wait state. It is not good,
241 but not fatal yet.
242 */
243
244 if (th->syn && !th->rst && !th->ack && !paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700245 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
246 (tmp_opt.saw_tstamp &&
247 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
248 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 if (isn == 0)
250 isn++;
251 TCP_SKB_CB(skb)->when = isn;
252 return TCP_TW_SYN;
253 }
254
255 if (paws_reject)
256 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
257
258 if(!th->rst) {
259 /* In this case we must reset the TIMEWAIT timer.
260 *
261 * If it is ACKless SYN it may be both old duplicate
262 * and new good SYN with random sequence number <rcv_nxt.
263 * Do not reschedule in the last case.
264 */
265 if (paws_reject || th->ack)
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700266 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 /* Send ACK. Note, we do not put the bucket,
269 * it will be released by caller.
270 */
271 return TCP_TW_ACK;
272 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700273 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 return TCP_TW_SUCCESS;
275}
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277/*
278 * Move a socket to time-wait or dead fin-wait-2 state.
279 */
280void tcp_time_wait(struct sock *sk, int state, int timeo)
281{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700282 struct inet_timewait_sock *tw = NULL;
283 const struct tcp_sock *tp = tcp_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 int recycle_ok = 0;
285
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700286 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 recycle_ok = tp->af_specific->remember_stamp(sk);
288
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700289 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700290 tw = inet_twsk_alloc(sk, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700292 if (tw != NULL) {
293 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700294 const struct inet_connection_sock *icsk = inet_csk(sk);
295 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700298 tcptw->tw_rcv_nxt = tp->rcv_nxt;
299 tcptw->tw_snd_nxt = tp->snd_nxt;
300 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
301 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
302 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
305 if (tw->tw_family == PF_INET6) {
306 struct ipv6_pinfo *np = inet6_sk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700307 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700309 ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
310 ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
311 tw->tw_ipv6only = np->ipv6only;
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#endif
314 /* Linkage updates. */
Arnaldo Carvalho de Meloe48c4142005-08-09 20:09:46 -0700315 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
317 /* Get the TIME_WAIT timeout firing. */
318 if (timeo < rto)
319 timeo = rto;
320
321 if (recycle_ok) {
322 tw->tw_timeout = rto;
323 } else {
324 tw->tw_timeout = TCP_TIMEWAIT_LEN;
325 if (state == TCP_TIME_WAIT)
326 timeo = TCP_TIMEWAIT_LEN;
327 }
328
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700329 inet_twsk_schedule(tw, &tcp_death_row, timeo);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700330 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 } else {
332 /* Sorry, if we're out of memory, just CLOSE this
333 * socket up. We've got bigger problems than
334 * non-graceful socket closings.
335 */
336 if (net_ratelimit())
337 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
338 }
339
340 tcp_update_metrics(sk);
341 tcp_done(sk);
342}
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344/* Returns non-zero if quota exceeded. */
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700345static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
346 const int slot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700348 struct inet_timewait_sock *tw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 struct hlist_node *node;
350 unsigned int killed;
351 int ret;
352
353 /* NOTE: compare this to previous version where lock
354 * was released after detaching chain. It was racy,
355 * because tw buckets are scheduled in not serialized context
356 * in 2.3 (with netfilter), and with softnet it is common, because
357 * soft irqs are not sequenced.
358 */
359 killed = 0;
360 ret = 0;
361rescan:
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700362 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700363 __inet_twsk_del_dead_node(tw);
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700364 spin_unlock(&twdr->death_lock);
365 __inet_twsk_kill(tw, twdr->hashinfo);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700366 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 killed++;
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700368 spin_lock(&twdr->death_lock);
369 if (killed > INET_TWDR_TWKILL_QUOTA) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 ret = 1;
371 break;
372 }
373
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700374 /* While we dropped twdr->death_lock, another cpu may have
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 * killed off the next TW bucket in the list, therefore
376 * do a fresh re-read of the hlist head node with the
377 * lock reacquired. We still use the hlist traversal
378 * macro in order to get the prefetches.
379 */
380 goto rescan;
381 }
382
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700383 twdr->tw_count -= killed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
385
386 return ret;
387}
388
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700389static void inet_twdr_hangman(unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700391 struct inet_timewait_death_row *twdr;
392 int unsigned need_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700394 twdr = (struct inet_timewait_death_row *)data;
395 spin_lock(&twdr->death_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700397 if (twdr->tw_count == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 goto out;
399
400 need_timer = 0;
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700401 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
402 twdr->thread_slots |= (1 << twdr->slot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 mb();
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700404 schedule_work(&twdr->twkill_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 need_timer = 1;
406 } else {
407 /* We purged the entire slot, anything left? */
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700408 if (twdr->tw_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 need_timer = 1;
410 }
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700411 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 if (need_timer)
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700413 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414out:
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700415 spin_unlock(&twdr->death_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
418extern void twkill_slots_invalid(void);
419
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700420static void inet_twdr_twkill_work(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421{
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700422 struct inet_timewait_death_row *twdr = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 int i;
424
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700425 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 twkill_slots_invalid();
427
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700428 while (twdr->thread_slots) {
429 spin_lock_bh(&twdr->death_lock);
430 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
431 if (!(twdr->thread_slots & (1 << i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 continue;
433
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700434 while (inet_twdr_do_twkill_work(twdr, i) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 if (need_resched()) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700436 spin_unlock_bh(&twdr->death_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 schedule();
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700438 spin_lock_bh(&twdr->death_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 }
440 }
441
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700442 twdr->thread_slots &= ~(1 << i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 }
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700444 spin_unlock_bh(&twdr->death_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446}
447
448/* These are always called from BH context. See callers in
449 * tcp_input.c to verify this.
450 */
451
452/* This is for handling early-kills of TIME_WAIT sockets. */
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700453void inet_twsk_deschedule(struct inet_timewait_sock *tw,
454 struct inet_timewait_death_row *twdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700456 spin_lock(&twdr->death_lock);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700457 if (inet_twsk_del_dead_node(tw)) {
458 inet_twsk_put(tw);
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700459 if (--twdr->tw_count == 0)
460 del_timer(&twdr->tw_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 }
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700462 spin_unlock(&twdr->death_lock);
463 __inet_twsk_kill(tw, twdr->hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700466static void inet_twsk_schedule(struct inet_timewait_sock *tw,
467 struct inet_timewait_death_row *twdr,
468 const int timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469{
470 struct hlist_head *list;
471 int slot;
472
473 /* timeout := RTO * 3.5
474 *
475 * 3.5 = 1+2+0.5 to wait for two retransmits.
476 *
477 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
478 * our ACK acking that FIN can be lost. If N subsequent retransmitted
479 * FINs (or previous seqments) are lost (probability of such event
480 * is p^(N+1), where p is probability to lose single packet and
481 * time to detect the loss is about RTO*(2^N - 1) with exponential
482 * backoff). Normal timewait length is calculated so, that we
483 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
484 * [ BTW Linux. following BSD, violates this requirement waiting
485 * only for 60sec, we should wait at least for 240 secs.
486 * Well, 240 consumes too much of resources 8)
487 * ]
488 * This interval is not reduced to catch old duplicate and
489 * responces to our wandering segments living for two MSLs.
490 * However, if we use PAWS to detect
491 * old duplicates, we can reduce the interval to bounds required
492 * by RTO, rather than MSL. So, if peer understands PAWS, we
493 * kill tw bucket after 3.5*RTO (it is important that this number
494 * is greater than TS tick!) and detect old duplicates with help
495 * of PAWS.
496 */
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700497 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700499 spin_lock(&twdr->death_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
501 /* Unlink it, if it was scheduled */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700502 if (inet_twsk_del_dead_node(tw))
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700503 twdr->tw_count--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 else
505 atomic_inc(&tw->tw_refcnt);
506
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700507 if (slot >= INET_TWDR_RECYCLE_SLOTS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 /* Schedule to slow timer */
509 if (timeo >= TCP_TIMEWAIT_LEN) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700510 slot = INET_TWDR_TWKILL_SLOTS - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 } else {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700512 slot = (timeo + twdr->period - 1) / twdr->period;
513 if (slot >= INET_TWDR_TWKILL_SLOTS)
514 slot = INET_TWDR_TWKILL_SLOTS - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
516 tw->tw_ttd = jiffies + timeo;
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700517 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
518 list = &twdr->cells[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 } else {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700520 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700522 if (twdr->twcal_hand < 0) {
523 twdr->twcal_hand = 0;
524 twdr->twcal_jiffie = jiffies;
525 twdr->twcal_timer.expires = twdr->twcal_jiffie +
526 (slot << INET_TWDR_RECYCLE_TICK);
527 add_timer(&twdr->twcal_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 } else {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700529 if (time_after(twdr->twcal_timer.expires,
530 jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
531 mod_timer(&twdr->twcal_timer,
532 jiffies + (slot << INET_TWDR_RECYCLE_TICK));
533 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700535 list = &twdr->twcal_row[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 }
537
538 hlist_add_head(&tw->tw_death_node, list);
539
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700540 if (twdr->tw_count++ == 0)
541 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
542 spin_unlock(&twdr->death_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543}
544
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700545void inet_twdr_twcal_tick(unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700547 struct inet_timewait_death_row *twdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 int n, slot;
549 unsigned long j;
550 unsigned long now = jiffies;
551 int killed = 0;
552 int adv = 0;
553
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700554 twdr = (struct inet_timewait_death_row *)data;
555
556 spin_lock(&twdr->death_lock);
557 if (twdr->twcal_hand < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 goto out;
559
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700560 slot = twdr->twcal_hand;
561 j = twdr->twcal_jiffie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700563 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 if (time_before_eq(j, now)) {
565 struct hlist_node *node, *safe;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700566 struct inet_timewait_sock *tw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700568 inet_twsk_for_each_inmate_safe(tw, node, safe,
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700569 &twdr->twcal_row[slot]) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700570 __inet_twsk_del_dead_node(tw);
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700571 __inet_twsk_kill(tw, twdr->hashinfo);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700572 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 killed++;
574 }
575 } else {
576 if (!adv) {
577 adv = 1;
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700578 twdr->twcal_jiffie = j;
579 twdr->twcal_hand = slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 }
581
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700582 if (!hlist_empty(&twdr->twcal_row[slot])) {
583 mod_timer(&twdr->twcal_timer, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 goto out;
585 }
586 }
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700587 j += 1 << INET_TWDR_RECYCLE_TICK;
588 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 }
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700590 twdr->twcal_hand = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
592out:
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700593 if ((twdr->tw_count -= killed) == 0)
594 del_timer(&twdr->tw_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700596 spin_unlock(&twdr->death_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597}
598
599/* This is not only more efficient than what we used to do, it eliminates
600 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
601 *
602 * Actually, we could lots of memory writes here. tp of listening
603 * socket contains all necessary default parameters.
604 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700605struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700607 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -0700609 if (newsk != NULL) {
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700610 const struct inet_request_sock *ireq = inet_rsk(req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700611 struct tcp_request_sock *treq = tcp_rsk(req);
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700612 struct inet_connection_sock *newicsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 struct tcp_sock *newtp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 /* Now setup tcp_sock */
616 newtp = tcp_sk(newsk);
617 newtp->pred_flags = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700618 newtp->rcv_nxt = treq->rcv_isn + 1;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -0700619 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 tcp_prequeue_init(newtp);
622
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700623 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 newtp->srtt = 0;
626 newtp->mdev = TCP_TIMEOUT_INIT;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700627 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
629 newtp->packets_out = 0;
630 newtp->left_out = 0;
631 newtp->retrans_out = 0;
632 newtp->sacked_out = 0;
633 newtp->fackets_out = 0;
634 newtp->snd_ssthresh = 0x7fffffff;
635
636 /* So many TCP implementations out there (incorrectly) count the
637 * initial SYN frame in their delayed-ACK and congestion control
638 * algorithms that we must have the following bandaid to talk
639 * efficiently to them. -DaveM
640 */
641 newtp->snd_cwnd = 2;
642 newtp->snd_cwnd_cnt = 0;
643
644 newtp->frto_counter = 0;
645 newtp->frto_highmark = 0;
646
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700647 newtp->ca_ops = &tcp_reno;
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 tcp_set_ca_state(newtp, TCP_CA_Open);
650 tcp_init_xmit_timers(newsk);
651 skb_queue_head_init(&newtp->out_of_order_queue);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700652 newtp->rcv_wup = treq->rcv_isn + 1;
653 newtp->write_seq = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 newtp->pushed_seq = newtp->write_seq;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700655 newtp->copied_seq = treq->rcv_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
657 newtp->rx_opt.saw_tstamp = 0;
658
659 newtp->rx_opt.dsack = 0;
660 newtp->rx_opt.eff_sacks = 0;
661
662 newtp->probes_out = 0;
663 newtp->rx_opt.num_sacks = 0;
664 newtp->urg_data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 if (sock_flag(newsk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700667 inet_csk_reset_keepalive_timer(newsk,
668 keepalive_time_when(newtp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700670 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
671 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (sysctl_tcp_fack)
673 newtp->rx_opt.sack_ok |= 2;
674 }
675 newtp->window_clamp = req->window_clamp;
676 newtp->rcv_ssthresh = req->rcv_wnd;
677 newtp->rcv_wnd = req->rcv_wnd;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700678 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if (newtp->rx_opt.wscale_ok) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700680 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
681 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 } else {
683 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
684 newtp->window_clamp = min(newtp->window_clamp, 65535U);
685 }
686 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
687 newtp->max_window = newtp->snd_wnd;
688
689 if (newtp->rx_opt.tstamp_ok) {
690 newtp->rx_opt.ts_recent = req->ts_recent;
691 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
692 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
693 } else {
694 newtp->rx_opt.ts_recent_stamp = 0;
695 newtp->tcp_header_len = sizeof(struct tcphdr);
696 }
697 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700698 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 newtp->rx_opt.mss_clamp = req->mss;
700 TCP_ECN_openreq_child(newtp, req);
701 if (newtp->ecn_flags&TCP_ECN_OK)
702 sock_set_flag(newsk, SOCK_NO_LARGESEND);
703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
705 }
706 return newsk;
707}
708
709/*
710 * Process an incoming packet for SYN_RECV sockets represented
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700711 * as a request_sock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 */
713
714struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700715 struct request_sock *req,
716 struct request_sock **prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717{
718 struct tcphdr *th = skb->h.th;
719 struct tcp_sock *tp = tcp_sk(sk);
720 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
721 int paws_reject = 0;
722 struct tcp_options_received tmp_opt;
723 struct sock *child;
724
725 tmp_opt.saw_tstamp = 0;
726 if (th->doff > (sizeof(struct tcphdr)>>2)) {
727 tcp_parse_options(skb, &tmp_opt, 0);
728
729 if (tmp_opt.saw_tstamp) {
730 tmp_opt.ts_recent = req->ts_recent;
731 /* We do not store true stamp, but it is not required,
732 * it can be estimated (approximately)
733 * from another data.
734 */
735 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
736 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
737 }
738 }
739
740 /* Check for pure retransmitted SYN. */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700741 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 flg == TCP_FLAG_SYN &&
743 !paws_reject) {
744 /*
745 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
746 * this case on figure 6 and figure 8, but formal
747 * protocol description says NOTHING.
748 * To be more exact, it says that we should send ACK,
749 * because this segment (at least, if it has no data)
750 * is out of window.
751 *
752 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
753 * describe SYN-RECV state. All the description
754 * is wrong, we cannot believe to it and should
755 * rely only on common sense and implementation
756 * experience.
757 *
758 * Enforce "SYN-ACK" according to figure 8, figure 6
759 * of RFC793, fixed by RFC1122.
760 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700761 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 return NULL;
763 }
764
765 /* Further reproduces section "SEGMENT ARRIVES"
766 for state SYN-RECEIVED of RFC793.
767 It is broken, however, it does not work only
768 when SYNs are crossed.
769
770 You would think that SYN crossing is impossible here, since
771 we should have a SYN_SENT socket (from connect()) on our end,
772 but this is not true if the crossed SYNs were sent to both
773 ends by a malicious third party. We must defend against this,
774 and to do that we first verify the ACK (as per RFC793, page
775 36) and reset if it is invalid. Is this a true full defense?
776 To convince ourselves, let us consider a way in which the ACK
777 test can still pass in this 'malicious crossed SYNs' case.
778 Malicious sender sends identical SYNs (and thus identical sequence
779 numbers) to both A and B:
780
781 A: gets SYN, seq=7
782 B: gets SYN, seq=7
783
784 By our good fortune, both A and B select the same initial
785 send sequence number of seven :-)
786
787 A: sends SYN|ACK, seq=7, ack_seq=8
788 B: sends SYN|ACK, seq=7, ack_seq=8
789
790 So we are now A eating this SYN|ACK, ACK test passes. So
791 does sequence test, SYN is truncated, and thus we consider
792 it a bare ACK.
793
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -0700794 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
795 bare ACK. Otherwise, we create an established connection. Both
796 ends (listening sockets) accept the new incoming connection and try
797 to talk to each other. 8-)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 Note: This case is both harmless, and rare. Possibility is about the
800 same as us discovering intelligent life on another plant tomorrow.
801
802 But generally, we should (RFC lies!) to accept ACK
803 from SYNACK both here and in tcp_rcv_state_process().
804 tcp_rcv_state_process() does not, hence, we do not too.
805
806 Note that the case is absolutely generic:
807 we cannot optimize anything here without
808 violating protocol. All the checks must be made
809 before attempt to create socket.
810 */
811
812 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
813 * and the incoming segment acknowledges something not yet
814 * sent (the segment carries an unaccaptable ACK) ...
815 * a reset is sent."
816 *
817 * Invalid ACK: reset will be sent by listening socket
818 */
819 if ((flg & TCP_FLAG_ACK) &&
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700820 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 return sk;
822
823 /* Also, it would be not so bad idea to check rcv_tsecr, which
824 * is essentially ACK extension and too early or too late values
825 * should cause reset in unsynchronized states.
826 */
827
828 /* RFC793: "first check sequence number". */
829
830 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700831 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 /* Out of window: send ACK and drop. */
833 if (!(flg & TCP_FLAG_RST))
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700834 req->rsk_ops->send_ack(skb, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 if (paws_reject)
836 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
837 return NULL;
838 }
839
840 /* In sequence, PAWS is OK. */
841
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700842 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 req->ts_recent = tmp_opt.rcv_tsval;
844
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700845 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 /* Truncate SYN, it is out of window starting
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700847 at tcp_rsk(req)->rcv_isn + 1. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 flg &= ~TCP_FLAG_SYN;
849 }
850
851 /* RFC793: "second check the RST bit" and
852 * "fourth, check the SYN bit"
853 */
854 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
855 goto embryonic_reset;
856
857 /* ACK sequence verified above, just make sure ACK is
858 * set. If ACK not set, just silently drop the packet.
859 */
860 if (!(flg & TCP_FLAG_ACK))
861 return NULL;
862
863 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -0700864 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
865 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700866 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return NULL;
868 }
869
870 /* OK, ACK is valid, create big socket and
871 * feed this segment to it. It will repeat all
872 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
873 * ESTABLISHED STATE. If it will be dropped after
874 * socket is created, wait for troubles.
875 */
876 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
877 if (child == NULL)
878 goto listen_overflow;
879
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700880 inet_csk_reqsk_queue_unlink(sk, req, prev);
881 inet_csk_reqsk_queue_removed(sk, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700883 inet_csk_reqsk_queue_add(sk, req, child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 return child;
885
886 listen_overflow:
887 if (!sysctl_tcp_abort_on_overflow) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700888 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 return NULL;
890 }
891
892 embryonic_reset:
893 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
894 if (!(flg & TCP_FLAG_RST))
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700895 req->rsk_ops->send_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700897 inet_csk_reqsk_queue_drop(sk, req, prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 return NULL;
899}
900
901/*
902 * Queue segment on the new socket if the new socket is active,
903 * otherwise we just shortcircuit this and continue with
904 * the new socket.
905 */
906
907int tcp_child_process(struct sock *parent, struct sock *child,
908 struct sk_buff *skb)
909{
910 int ret = 0;
911 int state = child->sk_state;
912
913 if (!sock_owned_by_user(child)) {
914 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
915
916 /* Wakeup parent, send SIGIO */
917 if (state == TCP_SYN_RECV && child->sk_state != state)
918 parent->sk_data_ready(parent, 0);
919 } else {
920 /* Alas, it is possible again, because we do lookup
921 * in main socket hash table and lock on listening
922 * socket does not protect us more.
923 */
924 sk_add_backlog(child, skb);
925 }
926
927 bh_unlock_sock(child);
928 sock_put(child);
929 return ret;
930}
931
932EXPORT_SYMBOL(tcp_check_req);
933EXPORT_SYMBOL(tcp_child_process);
934EXPORT_SYMBOL(tcp_create_openreq_child);
935EXPORT_SYMBOL(tcp_timewait_state_process);
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700936EXPORT_SYMBOL(inet_twsk_deschedule);