blob: 50a2e909e793e415f5526c962f3a0625af9c0b06 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define FASTRETRANS_DEBUG 1
22
23#include <linux/list.h>
24#include <linux/tcp.h>
25#include <linux/bug.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
29#include <linux/skbuff.h>
30#include <linux/dmaengine.h>
31#include <linux/crypto.h>
32#include <linux/cryptohash.h>
33#include <linux/kref.h>
34
35#include <net/inet_connection_sock.h>
36#include <net/inet_timewait_sock.h>
37#include <net/inet_hashtables.h>
38#include <net/checksum.h>
39#include <net/request_sock.h>
40#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
43#include <net/tcp_states.h>
44#include <net/inet_ecn.h>
45#include <net/dst.h>
46
47#include <linux/seq_file.h>
48#include <linux/memcontrol.h>
49
50extern struct inet_hashinfo tcp_hashinfo;
51
52extern struct percpu_counter tcp_orphan_count;
53extern void tcp_time_wait(struct sock *sk, int state, int timeo);
54
55#define MAX_TCP_HEADER (128 + MAX_HEADER)
56#define MAX_TCP_OPTION_SPACE 40
57
58#define MAX_TCP_WINDOW 32767U
59
60#define TCP_DEFAULT_INIT_RCVWND 10
61
62#define TCP_MIN_MSS 88U
63
64#define TCP_BASE_MSS 512
65
66#define TCP_FASTRETRANS_THRESH 3
67
68#define TCP_MAX_REORDERING 127
69
70#define TCP_MAX_QUICKACKS 16U
71
72#define TCP_URG_VALID 0x0100
73#define TCP_URG_NOTYET 0x0200
74#define TCP_URG_READ 0x0400
75
76#define TCP_RETR1 3
77
78#define TCP_RETR2 15
79
80#define TCP_SYN_RETRIES 5
81
82#define TCP_SYNACK_RETRIES 5
83
84#define TCP_TIMEWAIT_LEN (60*HZ)
85#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
86
87#define TCP_DELACK_MAX ((unsigned)(HZ/5))
88#if HZ >= 100
89#define TCP_DELACK_MIN ((unsigned)(HZ/25))
90#define TCP_ATO_MIN ((unsigned)(HZ/25))
91#else
92#define TCP_DELACK_MIN 4U
93#define TCP_ATO_MIN 4U
94#endif
95#define TCP_RTO_MAX ((unsigned)(120*HZ))
96#define TCP_RTO_MIN ((unsigned)(HZ/5))
97#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))
98#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))
99
100#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U))
101
102#define TCP_KEEPALIVE_TIME (120*60*HZ)
103#define TCP_KEEPALIVE_PROBES 9
104#define TCP_KEEPALIVE_INTVL (75*HZ)
105
106#define MAX_TCP_KEEPIDLE 32767
107#define MAX_TCP_KEEPINTVL 32767
108#define MAX_TCP_KEEPCNT 127
109#define MAX_TCP_SYNCNT 127
110
111#define TCP_SYNQ_INTERVAL (HZ/5)
112
113#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
114#define TCP_PAWS_MSL 60
115#define TCP_PAWS_WINDOW 1
116
117#define TCPOPT_NOP 1
118#define TCPOPT_EOL 0
119#define TCPOPT_MSS 2
120#define TCPOPT_WINDOW 3
121#define TCPOPT_SACK_PERM 4
122#define TCPOPT_SACK 5
123#define TCPOPT_TIMESTAMP 8
124#define TCPOPT_MD5SIG 19
125#define TCPOPT_COOKIE 253
126
127
128#define TCPOLEN_MSS 4
129#define TCPOLEN_WINDOW 3
130#define TCPOLEN_SACK_PERM 2
131#define TCPOLEN_TIMESTAMP 10
132#define TCPOLEN_MD5SIG 18
133#define TCPOLEN_COOKIE_BASE 2
134#define TCPOLEN_COOKIE_PAIR 3
135#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
136#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
137
138#define TCPOLEN_TSTAMP_ALIGNED 12
139#define TCPOLEN_WSCALE_ALIGNED 4
140#define TCPOLEN_SACKPERM_ALIGNED 4
141#define TCPOLEN_SACK_BASE 2
142#define TCPOLEN_SACK_BASE_ALIGNED 4
143#define TCPOLEN_SACK_PERBLOCK 8
144#define TCPOLEN_MD5SIG_ALIGNED 20
145#define TCPOLEN_MSS_ALIGNED 4
146
147#define TCP_NAGLE_OFF 1
148#define TCP_NAGLE_CORK 2
149#define TCP_NAGLE_PUSH 4
150
151#define TCP_THIN_LINEAR_RETRIES 6
152
153#define TCP_INIT_CWND 10
154
155extern struct inet_timewait_death_row tcp_death_row;
156
157extern int sysctl_tcp_timestamps;
158extern int sysctl_tcp_window_scaling;
159extern int sysctl_tcp_sack;
160extern int sysctl_tcp_fin_timeout;
161extern int sysctl_tcp_keepalive_time;
162extern int sysctl_tcp_keepalive_probes;
163extern int sysctl_tcp_keepalive_intvl;
164extern int sysctl_tcp_syn_retries;
165extern int sysctl_tcp_synack_retries;
166extern int sysctl_tcp_retries1;
167extern int sysctl_tcp_retries2;
168extern int sysctl_tcp_orphan_retries;
169extern int sysctl_tcp_syncookies;
170extern int sysctl_tcp_retrans_collapse;
171extern int sysctl_tcp_stdurg;
172extern int sysctl_tcp_rfc1337;
173extern int sysctl_tcp_abort_on_overflow;
174extern int sysctl_tcp_max_orphans;
175extern int sysctl_tcp_fack;
176extern int sysctl_tcp_reordering;
177extern int sysctl_tcp_ecn;
178extern int sysctl_tcp_dsack;
179extern int sysctl_tcp_wmem[3];
180extern int sysctl_tcp_rmem[3];
181extern int sysctl_tcp_app_win;
182extern int sysctl_tcp_adv_win_scale;
183extern int sysctl_tcp_tw_reuse;
184extern int sysctl_tcp_frto;
185extern int sysctl_tcp_frto_response;
186extern int sysctl_tcp_low_latency;
187extern int sysctl_tcp_dma_copybreak;
188extern int sysctl_tcp_nometrics_save;
189extern int sysctl_tcp_moderate_rcvbuf;
190extern int sysctl_tcp_tso_win_divisor;
191extern int sysctl_tcp_abc;
192extern int sysctl_tcp_mtu_probing;
193extern int sysctl_tcp_base_mss;
194extern int sysctl_tcp_workaround_signed_windows;
195extern int sysctl_tcp_slow_start_after_idle;
196extern int sysctl_tcp_max_ssthresh;
197extern int sysctl_tcp_cookie_size;
198extern int sysctl_tcp_thin_linear_timeouts;
199extern int sysctl_tcp_thin_dupack;
200
201#ifdef CONFIG_HTC_TCP_SYN_FAIL
202extern __be32 sysctl_tcp_syn_fail;
203#endif
204
205extern atomic_long_t tcp_memory_allocated;
206extern struct percpu_counter tcp_sockets_allocated;
207extern int tcp_memory_pressure;
208
209
210static inline int before(__u32 seq1, __u32 seq2)
211{
212 return (__s32)(seq1-seq2) < 0;
213}
214#define after(seq2, seq1) before(seq1, seq2)
215
216static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
217{
218 return seq3 - seq2 >= seq1 - seq2;
219}
220
221static inline bool tcp_out_of_memory(struct sock *sk)
222{
223 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
224 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
225 return true;
226 return false;
227}
228
229static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
230{
231 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
232 int orphans = percpu_counter_read_positive(ocp);
233
234 if (orphans << shift > sysctl_tcp_max_orphans) {
235 orphans = percpu_counter_sum_positive(ocp);
236 if (orphans << shift > sysctl_tcp_max_orphans)
237 return true;
238 }
239 return false;
240}
241
242extern bool tcp_check_oom(struct sock *sk, int shift);
243
244static inline void tcp_synq_overflow(struct sock *sk)
245{
246 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
247}
248
249static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
250{
251 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
252 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
253}
254
255extern struct proto tcp_prot;
256
257#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
258#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
259#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
260#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
261#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
262
263extern void tcp_init_mem(struct net *net);
264
265extern void tcp_v4_err(struct sk_buff *skb, u32);
266
267extern void tcp_shutdown (struct sock *sk, int how);
268
269extern int tcp_v4_rcv(struct sk_buff *skb);
270
271extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
272extern void *tcp_v4_tw_get_peer(struct sock *sk);
273extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
274extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
275 size_t size);
276extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
277 size_t size, int flags);
278extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
279extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
280 const struct tcphdr *th, unsigned int len);
281extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
282 const struct tcphdr *th, unsigned int len);
283extern void tcp_rcv_space_adjust(struct sock *sk);
284extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
285extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
286extern void tcp_twsk_destructor(struct sock *sk);
287extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
288 struct pipe_inode_info *pipe, size_t len,
289 unsigned int flags);
290
291static inline void tcp_dec_quickack_mode(struct sock *sk,
292 const unsigned int pkts)
293{
294 struct inet_connection_sock *icsk = inet_csk(sk);
295
296 if (icsk->icsk_ack.quick) {
297 if (pkts >= icsk->icsk_ack.quick) {
298 icsk->icsk_ack.quick = 0;
299
300 icsk->icsk_ack.ato = TCP_ATO_MIN;
301 } else
302 icsk->icsk_ack.quick -= pkts;
303 }
304}
305
306#define TCP_ECN_OK 1
307#define TCP_ECN_QUEUE_CWR 2
308#define TCP_ECN_DEMAND_CWR 4
309#define TCP_ECN_SEEN 8
310
311static __inline__ void
312TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
313{
314 if (sysctl_tcp_ecn && th->ece && th->cwr)
315 inet_rsk(req)->ecn_ok = 1;
316}
317
318enum tcp_tw_status {
319 TCP_TW_SUCCESS = 0,
320 TCP_TW_RST = 1,
321 TCP_TW_ACK = 2,
322 TCP_TW_SYN = 3
323};
324
325
326extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
327 struct sk_buff *skb,
328 const struct tcphdr *th);
329extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
330 struct request_sock *req,
331 struct request_sock **prev);
332extern int tcp_child_process(struct sock *parent, struct sock *child,
333 struct sk_buff *skb);
334extern int tcp_use_frto(struct sock *sk);
335extern void tcp_enter_frto(struct sock *sk);
336extern void tcp_enter_loss(struct sock *sk, int how);
337extern void tcp_clear_retrans(struct tcp_sock *tp);
338extern void tcp_update_metrics(struct sock *sk);
339extern void tcp_close(struct sock *sk, long timeout);
340extern unsigned int tcp_poll(struct file * file, struct socket *sock,
341 struct poll_table_struct *wait);
342extern int tcp_getsockopt(struct sock *sk, int level, int optname,
343 char __user *optval, int __user *optlen);
344extern int tcp_setsockopt(struct sock *sk, int level, int optname,
345 char __user *optval, unsigned int optlen);
346extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
347 char __user *optval, int __user *optlen);
348extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
349 char __user *optval, unsigned int optlen);
350extern void tcp_set_keepalive(struct sock *sk, int val);
351extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
352extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
353 size_t len, int nonblock, int flags, int *addr_len);
354extern void tcp_parse_options(const struct sk_buff *skb,
355 struct tcp_options_received *opt_rx, const u8 **hvpp,
356 int estab);
357extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
358
359
360extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
361extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
362extern struct sock * tcp_create_openreq_child(struct sock *sk,
363 struct request_sock *req,
364 struct sk_buff *skb);
365extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
366 struct request_sock *req,
367 struct dst_entry *dst);
368extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
369extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
370 int addr_len);
371extern int tcp_connect(struct sock *sk);
372extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
373 struct request_sock *req,
374 struct request_values *rvp);
375extern int tcp_disconnect(struct sock *sk, int flags);
376
377
378extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
379extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
380 struct ip_options *opt);
381#ifdef CONFIG_SYN_COOKIES
382extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
383 __u16 *mss);
384#else
385static inline __u32 cookie_v4_init_sequence(struct sock *sk,
386 struct sk_buff *skb,
387 __u16 *mss)
388{
389 return 0;
390}
391#endif
392
393extern __u32 cookie_init_timestamp(struct request_sock *req);
394extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
395
396extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
397#ifdef CONFIG_SYN_COOKIES
398extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
399 __u16 *mss);
400#else
401static inline __u32 cookie_v6_init_sequence(struct sock *sk,
402 struct sk_buff *skb,
403 __u16 *mss)
404{
405 return 0;
406}
407#endif
408
409extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
410 int nonagle);
411extern int tcp_may_send_now(struct sock *sk);
412extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
413extern void tcp_retransmit_timer(struct sock *sk);
414extern void tcp_xmit_retransmit_queue(struct sock *);
415extern void tcp_simple_retransmit(struct sock *);
416extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
417extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
418
419extern void tcp_send_probe0(struct sock *);
420extern void tcp_send_partial(struct sock *);
421extern int tcp_write_wakeup(struct sock *);
422extern void tcp_send_fin(struct sock *sk);
423extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
424extern int tcp_send_synack(struct sock *);
425extern int tcp_syn_flood_action(struct sock *sk,
426 const struct sk_buff *skb,
427 const char *proto);
428extern void tcp_push_one(struct sock *, unsigned int mss_now);
429extern void tcp_send_ack(struct sock *sk);
430extern void tcp_send_delayed_ack(struct sock *sk);
431
432extern void tcp_cwnd_application_limited(struct sock *sk);
433
434extern void tcp_init_xmit_timers(struct sock *);
435static inline void tcp_clear_xmit_timers(struct sock *sk)
436{
437 inet_csk_clear_xmit_timers(sk);
438}
439
440extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
441extern unsigned int tcp_current_mss(struct sock *sk);
442
443static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
444{
445 int cutoff;
446
447 if (tp->max_window >= 512)
448 cutoff = (tp->max_window >> 1);
449 else
450 cutoff = tp->max_window;
451
452 if (cutoff && pktsize > cutoff)
453 return max_t(int, cutoff, 68U - tp->tcp_header_len);
454 else
455 return pktsize;
456}
457
458extern void tcp_get_info(const struct sock *, struct tcp_info *);
459
460typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
461 unsigned int, size_t);
462extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
463 sk_read_actor_t recv_actor);
464
465extern void tcp_initialize_rcv_mss(struct sock *sk);
466
467extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu);
468extern int tcp_mss_to_mtu(const struct sock *sk, int mss);
469extern void tcp_mtup_init(struct sock *sk);
470extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
471
472static inline void tcp_bound_rto(const struct sock *sk)
473{
474 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
475 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
476}
477
478static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
479{
480 return (tp->srtt >> 3) + tp->rttvar;
481}
482
483static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
484{
485 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
486 ntohl(TCP_FLAG_ACK) |
487 snd_wnd);
488}
489
490static inline void tcp_fast_path_on(struct tcp_sock *tp)
491{
492 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
493}
494
495static inline void tcp_fast_path_check(struct sock *sk)
496{
497 struct tcp_sock *tp = tcp_sk(sk);
498
499 if (skb_queue_empty(&tp->out_of_order_queue) &&
500 tp->rcv_wnd &&
501 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
502 !tp->urg_data)
503 tcp_fast_path_on(tp);
504}
505
506static inline u32 tcp_rto_min(struct sock *sk)
507{
508 const struct dst_entry *dst = __sk_dst_get(sk);
509 u32 rto_min = TCP_RTO_MIN;
510
511 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
512 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
513 return rto_min;
514}
515
516static inline u32 tcp_receive_window(const struct tcp_sock *tp)
517{
518 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
519
520 if (win < 0)
521 win = 0;
522 return (u32) win;
523}
524
525extern u32 __tcp_select_window(struct sock *sk);
526
527#define tcp_time_stamp ((__u32)(jiffies))
528
529#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
530
531#define TCPHDR_FIN 0x01
532#define TCPHDR_SYN 0x02
533#define TCPHDR_RST 0x04
534#define TCPHDR_PSH 0x08
535#define TCPHDR_ACK 0x10
536#define TCPHDR_URG 0x20
537#define TCPHDR_ECE 0x40
538#define TCPHDR_CWR 0x80
539
540struct tcp_skb_cb {
541 union {
542 struct inet_skb_parm h4;
543#if IS_ENABLED(CONFIG_IPV6)
544 struct inet6_skb_parm h6;
545#endif
546 } header;
547 __u32 seq;
548 __u32 end_seq;
549 __u32 when;
550 __u8 tcp_flags;
551 __u8 sacked;
552#define TCPCB_SACKED_ACKED 0x01
553#define TCPCB_SACKED_RETRANS 0x02
554#define TCPCB_LOST 0x04
555#define TCPCB_TAGBITS 0x07
556 __u8 ip_dsfield;
557
558#define TCPCB_EVER_RETRANS 0x80
559#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
560
561 __u32 ack_seq;
562};
563
564#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
565
566static inline int tcp_skb_pcount(const struct sk_buff *skb)
567{
568 return skb_shinfo(skb)->gso_segs;
569}
570
571static inline int tcp_skb_mss(const struct sk_buff *skb)
572{
573 return skb_shinfo(skb)->gso_size;
574}
575
576enum tcp_ca_event {
577 CA_EVENT_TX_START,
578 CA_EVENT_CWND_RESTART,
579 CA_EVENT_COMPLETE_CWR,
580 CA_EVENT_FRTO,
581 CA_EVENT_LOSS,
582 CA_EVENT_FAST_ACK,
583 CA_EVENT_SLOW_ACK,
584};
585
586#define TCP_CA_NAME_MAX 16
587#define TCP_CA_MAX 128
588#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
589
590#define TCP_CONG_NON_RESTRICTED 0x1
591#define TCP_CONG_RTT_STAMP 0x2
592
593struct tcp_congestion_ops {
594 struct list_head list;
595 unsigned long flags;
596
597
598 void (*init)(struct sock *sk);
599
600 void (*release)(struct sock *sk);
601
602
603 u32 (*ssthresh)(struct sock *sk);
604
605 u32 (*min_cwnd)(const struct sock *sk);
606
607 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
608
609 void (*set_state)(struct sock *sk, u8 new_state);
610
611 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
612
613 u32 (*undo_cwnd)(struct sock *sk);
614
615 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
616
617 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
618
619 char name[TCP_CA_NAME_MAX];
620 struct module *owner;
621};
622
623extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
624extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
625
626extern void tcp_init_congestion_control(struct sock *sk);
627extern void tcp_cleanup_congestion_control(struct sock *sk);
628extern int tcp_set_default_congestion_control(const char *name);
629extern void tcp_get_default_congestion_control(char *name);
630extern void tcp_get_available_congestion_control(char *buf, size_t len);
631extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
632extern int tcp_set_allowed_congestion_control(char *allowed);
633extern int tcp_set_congestion_control(struct sock *sk, const char *name);
634extern void tcp_slow_start(struct tcp_sock *tp);
635extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
636
637extern struct tcp_congestion_ops tcp_init_congestion_ops;
638extern u32 tcp_reno_ssthresh(struct sock *sk);
639extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
640extern u32 tcp_reno_min_cwnd(const struct sock *sk);
641extern struct tcp_congestion_ops tcp_reno;
642
643static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
644{
645 struct inet_connection_sock *icsk = inet_csk(sk);
646
647 if (icsk->icsk_ca_ops->set_state)
648 icsk->icsk_ca_ops->set_state(sk, ca_state);
649 icsk->icsk_ca_state = ca_state;
650}
651
652static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
653{
654 const struct inet_connection_sock *icsk = inet_csk(sk);
655
656 if (icsk->icsk_ca_ops->cwnd_event)
657 icsk->icsk_ca_ops->cwnd_event(sk, event);
658}
659
660static inline int tcp_is_sack(const struct tcp_sock *tp)
661{
662 return tp->rx_opt.sack_ok;
663}
664
665static inline int tcp_is_reno(const struct tcp_sock *tp)
666{
667 return !tcp_is_sack(tp);
668}
669
670static inline int tcp_is_fack(const struct tcp_sock *tp)
671{
672 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
673}
674
675static inline void tcp_enable_fack(struct tcp_sock *tp)
676{
677 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
678}
679
680static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
681{
682 return tp->sacked_out + tp->lost_out;
683}
684
685static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
686{
687 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
688}
689
690#define TCP_INFINITE_SSTHRESH 0x7fffffff
691
692static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
693{
694 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
695}
696
697static inline __u32 tcp_current_ssthresh(const struct sock *sk)
698{
699 const struct tcp_sock *tp = tcp_sk(sk);
700
701 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
702 return tp->snd_ssthresh;
703 else
704 return max(tp->snd_ssthresh,
705 ((tp->snd_cwnd >> 1) +
706 (tp->snd_cwnd >> 2)));
707}
708
709#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
710
711extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
712extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
713
714static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
715{
716 return 3;
717}
718
719static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
720{
721 return tp->reordering;
722}
723
724static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
725{
726 return tp->snd_una + tp->snd_wnd;
727}
728extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
729
730static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
731 const struct sk_buff *skb)
732{
733 if (skb->len < mss)
734 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
735}
736
737static inline void tcp_check_probe_timer(struct sock *sk)
738{
739 const struct tcp_sock *tp = tcp_sk(sk);
740 const struct inet_connection_sock *icsk = inet_csk(sk);
741
742 if (!tp->packets_out && !icsk->icsk_pending)
743 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
744 icsk->icsk_rto, TCP_RTO_MAX);
745}
746
747static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
748{
749 tp->snd_wl1 = seq;
750}
751
752static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
753{
754 tp->snd_wl1 = seq;
755}
756
757static inline __sum16 tcp_v4_check(int len, __be32 saddr,
758 __be32 daddr, __wsum base)
759{
760 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
761}
762
763static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
764{
765 return __skb_checksum_complete(skb);
766}
767
768static inline int tcp_checksum_complete(struct sk_buff *skb)
769{
770 return !skb_csum_unnecessary(skb) &&
771 __tcp_checksum_complete(skb);
772}
773
774
775static inline void tcp_prequeue_init(struct tcp_sock *tp)
776{
777 tp->ucopy.task = NULL;
778 tp->ucopy.len = 0;
779 tp->ucopy.memory = 0;
780 skb_queue_head_init(&tp->ucopy.prequeue);
781#ifdef CONFIG_NET_DMA
782 tp->ucopy.dma_chan = NULL;
783 tp->ucopy.wakeup = 0;
784 tp->ucopy.pinned_list = NULL;
785 tp->ucopy.dma_cookie = 0;
786#endif
787}
788
789static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
790{
791 struct tcp_sock *tp = tcp_sk(sk);
792
793 if (sysctl_tcp_low_latency || !tp->ucopy.task)
794 return 0;
795
796 __skb_queue_tail(&tp->ucopy.prequeue, skb);
797 tp->ucopy.memory += skb->truesize;
798 if (tp->ucopy.memory > sk->sk_rcvbuf) {
799 struct sk_buff *skb1;
800
801 BUG_ON(sock_owned_by_user(sk));
802
803 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
804 sk_backlog_rcv(sk, skb1);
805 NET_INC_STATS_BH(sock_net(sk),
806 LINUX_MIB_TCPPREQUEUEDROPPED);
807 }
808
809 tp->ucopy.memory = 0;
810 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
811 wake_up_interruptible_sync_poll(sk_sleep(sk),
812 POLLIN | POLLRDNORM | POLLRDBAND);
813 if (!inet_csk_ack_scheduled(sk))
814 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
815 (3 * tcp_rto_min(sk)) / 4,
816 TCP_RTO_MAX);
817 }
818 return 1;
819}
820
821
822#undef STATE_TRACE
823
824#ifdef STATE_TRACE
825static const char *statename[]={
826 "Unused","Established","Syn Sent","Syn Recv",
827 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
828 "Close Wait","Last ACK","Listen","Closing"
829};
830#endif
831extern void tcp_set_state(struct sock *sk, int state);
832
833extern void tcp_done(struct sock *sk);
834
835static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
836{
837 rx_opt->dsack = 0;
838 rx_opt->num_sacks = 0;
839}
840
841extern void tcp_select_initial_window(int __space, __u32 mss,
842 __u32 *rcv_wnd, __u32 *window_clamp,
843 int wscale_ok, __u8 *rcv_wscale,
844 __u32 init_rcv_wnd);
845
846static inline int tcp_win_from_space(int space)
847{
848 return sysctl_tcp_adv_win_scale<=0 ?
849 (space>>(-sysctl_tcp_adv_win_scale)) :
850 space - (space>>sysctl_tcp_adv_win_scale);
851}
852
853
854static inline int tcp_space(const struct sock *sk)
855{
856 return tcp_win_from_space(sk->sk_rcvbuf -
857 atomic_read(&sk->sk_rmem_alloc));
858}
859
860static inline int tcp_full_space(const struct sock *sk)
861{
862 return tcp_win_from_space(sk->sk_rcvbuf);
863}
864
865static inline void tcp_openreq_init(struct request_sock *req,
866 struct tcp_options_received *rx_opt,
867 struct sk_buff *skb)
868{
869 struct inet_request_sock *ireq = inet_rsk(req);
870
871 req->rcv_wnd = 0;
872 req->cookie_ts = 0;
873 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
874 req->mss = rx_opt->mss_clamp;
875 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
876 ireq->tstamp_ok = rx_opt->tstamp_ok;
877 ireq->sack_ok = rx_opt->sack_ok;
878 ireq->snd_wscale = rx_opt->snd_wscale;
879 ireq->wscale_ok = rx_opt->wscale_ok;
880 ireq->acked = 0;
881 ireq->ecn_ok = 0;
882 ireq->rmt_port = tcp_hdr(skb)->source;
883 ireq->loc_port = tcp_hdr(skb)->dest;
884}
885
886extern void tcp_enter_memory_pressure(struct sock *sk);
887
888static inline int keepalive_intvl_when(const struct tcp_sock *tp)
889{
890 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
891}
892
893static inline int keepalive_time_when(const struct tcp_sock *tp)
894{
895 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
896}
897
898static inline int keepalive_probes(const struct tcp_sock *tp)
899{
900 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
901}
902
903static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
904{
905 const struct inet_connection_sock *icsk = &tp->inet_conn;
906
907 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
908 tcp_time_stamp - tp->rcv_tstamp);
909}
910
911static inline int tcp_fin_time(const struct sock *sk)
912{
913 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
914 const int rto = inet_csk(sk)->icsk_rto;
915
916 if (fin_timeout < (rto << 2) - (rto >> 1))
917 fin_timeout = (rto << 2) - (rto >> 1);
918
919 return fin_timeout;
920}
921
922static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
923 int paws_win)
924{
925 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
926 return 1;
927 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
928 return 1;
929 if (!rx_opt->ts_recent)
930 return 1;
931 return 0;
932}
933
934static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
935 int rst)
936{
937 if (tcp_paws_check(rx_opt, 0))
938 return 0;
939
940 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
941 return 0;
942 return 1;
943}
944
945static inline void tcp_mib_init(struct net *net)
946{
947
948 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
949 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
950 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
951 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
952}
953
954static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
955{
956 tp->lost_skb_hint = NULL;
957 tp->scoreboard_skb_hint = NULL;
958}
959
960static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
961{
962 tcp_clear_retrans_hints_partial(tp);
963 tp->retransmit_skb_hint = NULL;
964}
965
966struct crypto_hash;
967
968union tcp_md5_addr {
969 struct in_addr a4;
970#if IS_ENABLED(CONFIG_IPV6)
971 struct in6_addr a6;
972#endif
973};
974
975struct tcp_md5sig_key {
976 struct hlist_node node;
977 u8 keylen;
978 u8 family;
979 union tcp_md5_addr addr;
980 u8 key[TCP_MD5SIG_MAXKEYLEN];
981 struct rcu_head rcu;
982};
983
984struct tcp_md5sig_info {
985 struct hlist_head head;
986 struct rcu_head rcu;
987};
988
989struct tcp4_pseudohdr {
990 __be32 saddr;
991 __be32 daddr;
992 __u8 pad;
993 __u8 protocol;
994 __be16 len;
995};
996
997struct tcp6_pseudohdr {
998 struct in6_addr saddr;
999 struct in6_addr daddr;
1000 __be32 len;
1001 __be32 protocol;
1002};
1003
1004union tcp_md5sum_block {
1005 struct tcp4_pseudohdr ip4;
1006#if IS_ENABLED(CONFIG_IPV6)
1007 struct tcp6_pseudohdr ip6;
1008#endif
1009};
1010
1011struct tcp_md5sig_pool {
1012 struct hash_desc md5_desc;
1013 union tcp_md5sum_block md5_blk;
1014};
1015
1016extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1017 const struct sock *sk,
1018 const struct request_sock *req,
1019 const struct sk_buff *skb);
1020extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1021 int family, const u8 *newkey,
1022 u8 newkeylen, gfp_t gfp);
1023extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1024 int family);
1025extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1026 struct sock *addr_sk);
1027
1028#ifdef CONFIG_TCP_MD5SIG
1029extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1030 const union tcp_md5_addr *addr, int family);
1031#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1032#else
1033static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1034 const union tcp_md5_addr *addr,
1035 int family)
1036{
1037 return NULL;
1038}
1039#define tcp_twsk_md5_key(twsk) NULL
1040#endif
1041
1042extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
1043extern void tcp_free_md5sig_pool(void);
1044
1045extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1046extern void tcp_put_md5sig_pool(void);
1047
1048extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1049extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1050 unsigned header_len);
1051extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1052 const struct tcp_md5sig_key *key);
1053
1054static inline void tcp_write_queue_purge(struct sock *sk)
1055{
1056 struct sk_buff *skb;
1057
1058 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1059 sk_wmem_free_skb(sk, skb);
1060 sk_mem_reclaim(sk);
1061 tcp_clear_all_retrans_hints(tcp_sk(sk));
1062}
1063
1064static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1065{
1066 return skb_peek(&sk->sk_write_queue);
1067}
1068
1069static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1070{
1071 return skb_peek_tail(&sk->sk_write_queue);
1072}
1073
1074static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1075 const struct sk_buff *skb)
1076{
1077 return skb_queue_next(&sk->sk_write_queue, skb);
1078}
1079
1080static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1081 const struct sk_buff *skb)
1082{
1083 return skb_queue_prev(&sk->sk_write_queue, skb);
1084}
1085
1086#define tcp_for_write_queue(skb, sk) \
1087 skb_queue_walk(&(sk)->sk_write_queue, skb)
1088
1089#define tcp_for_write_queue_from(skb, sk) \
1090 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1091
1092#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1093 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1094
1095static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1096{
1097 return sk->sk_send_head;
1098}
1099
1100static inline bool tcp_skb_is_last(const struct sock *sk,
1101 const struct sk_buff *skb)
1102{
1103 return skb_queue_is_last(&sk->sk_write_queue, skb);
1104}
1105
1106static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1107{
1108 if (tcp_skb_is_last(sk, skb))
1109 sk->sk_send_head = NULL;
1110 else
1111 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1112}
1113
1114static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1115{
1116 if (sk->sk_send_head == skb_unlinked)
1117 sk->sk_send_head = NULL;
1118}
1119
1120static inline void tcp_init_send_head(struct sock *sk)
1121{
1122 sk->sk_send_head = NULL;
1123}
1124
1125static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1126{
1127 __skb_queue_tail(&sk->sk_write_queue, skb);
1128}
1129
1130static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1131{
1132 __tcp_add_write_queue_tail(sk, skb);
1133
1134
1135 if (sk->sk_send_head == NULL) {
1136 sk->sk_send_head = skb;
1137
1138 if (tcp_sk(sk)->highest_sack == NULL)
1139 tcp_sk(sk)->highest_sack = skb;
1140 }
1141}
1142
1143static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1144{
1145 __skb_queue_head(&sk->sk_write_queue, skb);
1146}
1147
1148static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1149 struct sk_buff *buff,
1150 struct sock *sk)
1151{
1152 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1153}
1154
1155static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1156 struct sk_buff *skb,
1157 struct sock *sk)
1158{
1159 __skb_queue_before(&sk->sk_write_queue, skb, new);
1160
1161 if (sk->sk_send_head == skb)
1162 sk->sk_send_head = new;
1163}
1164
1165static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1166{
1167 __skb_unlink(skb, &sk->sk_write_queue);
1168}
1169
1170static inline int tcp_write_queue_empty(struct sock *sk)
1171{
1172 return skb_queue_empty(&sk->sk_write_queue);
1173}
1174
1175static inline void tcp_push_pending_frames(struct sock *sk)
1176{
1177 if (tcp_send_head(sk)) {
1178 struct tcp_sock *tp = tcp_sk(sk);
1179
1180 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1181 }
1182}
1183
1184static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1185{
1186 if (!tp->sacked_out)
1187 return tp->snd_una;
1188
1189 if (tp->highest_sack == NULL)
1190 return tp->snd_nxt;
1191
1192 return TCP_SKB_CB(tp->highest_sack)->seq;
1193}
1194
1195static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1196{
1197 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1198 tcp_write_queue_next(sk, skb);
1199}
1200
1201static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1202{
1203 return tcp_sk(sk)->highest_sack;
1204}
1205
1206static inline void tcp_highest_sack_reset(struct sock *sk)
1207{
1208 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1209}
1210
1211static inline void tcp_highest_sack_combine(struct sock *sk,
1212 struct sk_buff *old,
1213 struct sk_buff *new)
1214{
1215 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1216 tcp_sk(sk)->highest_sack = new;
1217}
1218
1219static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
1220{
1221 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1222}
1223
1224enum tcp_seq_states {
1225 TCP_SEQ_STATE_LISTENING,
1226 TCP_SEQ_STATE_OPENREQ,
1227 TCP_SEQ_STATE_ESTABLISHED,
1228 TCP_SEQ_STATE_TIME_WAIT,
1229};
1230
1231int tcp_seq_open(struct inode *inode, struct file *file);
1232
1233struct tcp_seq_afinfo {
1234 char *name;
1235 sa_family_t family;
1236 const struct file_operations *seq_fops;
1237 struct seq_operations seq_ops;
1238};
1239
1240struct tcp_iter_state {
1241 struct seq_net_private p;
1242 sa_family_t family;
1243 enum tcp_seq_states state;
1244 struct sock *syn_wait_sk;
1245 int bucket, offset, sbucket, num, uid;
1246 loff_t last_pos;
1247};
1248
1249extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1250extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1251
1252extern struct request_sock_ops tcp_request_sock_ops;
1253extern struct request_sock_ops tcp6_request_sock_ops;
1254
1255extern void tcp_v4_destroy_sock(struct sock *sk);
1256
1257extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1258extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1259 netdev_features_t features);
1260extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1261 struct sk_buff *skb);
1262extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1263 struct sk_buff *skb);
1264extern int tcp_gro_complete(struct sk_buff *skb);
1265extern int tcp4_gro_complete(struct sk_buff *skb);
1266
1267extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
1268
1269#ifdef CONFIG_PROC_FS
1270extern int tcp4_proc_init(void);
1271extern void tcp4_proc_exit(void);
1272#endif
1273
1274struct tcp_sock_af_ops {
1275#ifdef CONFIG_TCP_MD5SIG
1276 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1277 struct sock *addr_sk);
1278 int (*calc_md5_hash) (char *location,
1279 struct tcp_md5sig_key *md5,
1280 const struct sock *sk,
1281 const struct request_sock *req,
1282 const struct sk_buff *skb);
1283 int (*md5_parse) (struct sock *sk,
1284 char __user *optval,
1285 int optlen);
1286#endif
1287};
1288
1289struct tcp_request_sock_ops {
1290#ifdef CONFIG_TCP_MD5SIG
1291 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1292 struct request_sock *req);
1293 int (*calc_md5_hash) (char *location,
1294 struct tcp_md5sig_key *md5,
1295 const struct sock *sk,
1296 const struct request_sock *req,
1297 const struct sk_buff *skb);
1298#endif
1299};
1300
1301#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1302#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1303#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1304
1305extern int tcp_cookie_generator(u32 *bakery);
1306
1307struct tcp_cookie_values {
1308 struct kref kref;
1309 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1310 u8 cookie_pair_size;
1311 u8 cookie_desired;
1312 u16 s_data_desired:11,
1313 s_data_constant:1,
1314 s_data_in:1,
1315 s_data_out:1,
1316 s_data_unused:2;
1317 u8 s_data_payload[0];
1318};
1319
1320static inline void tcp_cookie_values_release(struct kref *kref)
1321{
1322 kfree(container_of(kref, struct tcp_cookie_values, kref));
1323}
1324
1325static inline int tcp_s_data_size(const struct tcp_sock *tp)
1326{
1327 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1328 ? tp->cookie_values->s_data_desired
1329 : 0;
1330}
1331
1332struct tcp_extend_values {
1333 struct request_values rv;
1334 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1335 u8 cookie_plus:6,
1336 cookie_out_never:1,
1337 cookie_in_always:1;
1338};
1339
1340static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1341{
1342 return (struct tcp_extend_values *)rvp;
1343}
1344
1345extern void tcp_v4_init(void);
1346extern void tcp_init(void);
1347
1348#endif