| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 1 | /* SCTP kernel implementation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * Copyright (c) 1999-2000 Cisco, Inc. | 
 | 3 |  * Copyright (c) 1999-2001 Motorola, Inc. | 
 | 4 |  * Copyright (c) 2001-2003 International Business Machines Corp. | 
 | 5 |  * Copyright (c) 2001 Intel Corp. | 
 | 6 |  * Copyright (c) 2001 La Monte H.P. Yarroll | 
 | 7 |  * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 8 |  * This file is part of the SCTP kernel implementation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 |  * | 
 | 10 |  * This module provides the abstraction for an SCTP tranport representing | 
 | 11 |  * a remote transport address.  For local transport addresses, we just use | 
 | 12 |  * union sctp_addr. | 
 | 13 |  * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 14 |  * This SCTP implementation is free software; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 |  * you can redistribute it and/or modify it under the terms of | 
 | 16 |  * the GNU General Public License as published by | 
 | 17 |  * the Free Software Foundation; either version 2, or (at your option) | 
 | 18 |  * any later version. | 
 | 19 |  * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 20 |  * This SCTP implementation is distributed in the hope that it | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 
 | 22 |  *                 ************************ | 
 | 23 |  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 
 | 24 |  * See the GNU General Public License for more details. | 
 | 25 |  * | 
 | 26 |  * You should have received a copy of the GNU General Public License | 
 | 27 |  * along with GNU CC; see the file COPYING.  If not, write to | 
 | 28 |  * the Free Software Foundation, 59 Temple Place - Suite 330, | 
 | 29 |  * Boston, MA 02111-1307, USA. | 
 | 30 |  * | 
 | 31 |  * Please send any bug reports or fixes you make to the | 
 | 32 |  * email address(es): | 
 | 33 |  *    lksctp developers <lksctp-developers@lists.sourceforge.net> | 
 | 34 |  * | 
 | 35 |  * Or submit a bug report through the following website: | 
 | 36 |  *    http://www.sf.net/projects/lksctp | 
 | 37 |  * | 
 | 38 |  * Written or modified by: | 
 | 39 |  *    La Monte H.P. Yarroll <piggy@acm.org> | 
 | 40 |  *    Karl Knutson          <karl@athena.chicago.il.us> | 
 | 41 |  *    Jon Grimm             <jgrimm@us.ibm.com> | 
 | 42 |  *    Xingang Guo           <xingang.guo@intel.com> | 
 | 43 |  *    Hui Huang             <hui.huang@nokia.com> | 
 | 44 |  *    Sridhar Samudrala	    <sri@us.ibm.com> | 
 | 45 |  *    Ardelle Fan	    <ardelle.fan@intel.com> | 
 | 46 |  * | 
 | 47 |  * Any bugs reported given to us we will try to fix... any fixes shared will | 
 | 48 |  * be incorporated into the next SCTP release. | 
 | 49 |  */ | 
 | 50 |  | 
| Joe Perches | 145ce50 | 2010-08-24 13:21:08 +0000 | [diff] [blame] | 51 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
 | 52 |  | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 53 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <linux/types.h> | 
| Sridhar Samudrala | ad8fec1 | 2006-07-21 14:48:50 -0700 | [diff] [blame] | 55 | #include <linux/random.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #include <net/sctp/sctp.h> | 
 | 57 | #include <net/sctp/sm.h> | 
 | 58 |  | 
 | 59 | /* 1st Level Abstractions.  */ | 
 | 60 |  | 
 | 61 | /* Initialize a new transport from provided memory.  */ | 
 | 62 | static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | 
 | 63 | 						  const union sctp_addr *addr, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 64 | 						  gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | { | 
 | 66 | 	/* Copy in the address.  */ | 
 | 67 | 	peer->ipaddr = *addr; | 
 | 68 | 	peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | 	memset(&peer->saddr, 0, sizeof(union sctp_addr)); | 
 | 70 |  | 
 | 71 | 	/* From 6.3.1 RTO Calculation: | 
 | 72 | 	 * | 
 | 73 | 	 * C1) Until an RTT measurement has been made for a packet sent to the | 
 | 74 | 	 * given destination transport address, set RTO to the protocol | 
 | 75 | 	 * parameter 'RTO.Initial'. | 
 | 76 | 	 */ | 
| Andrei Pelinescu-Onciul | 5fdd4ba | 2009-11-29 00:14:02 -0800 | [diff] [blame] | 77 | 	peer->rto = msecs_to_jiffies(sctp_rto_initial); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 |  | 
 | 79 | 	peer->last_time_heard = jiffies; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | 	peer->last_time_ecne_reduced = jiffies; | 
 | 81 |  | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 82 | 	peer->param_flags = SPP_HB_DISABLE | | 
 | 83 | 			    SPP_PMTUD_ENABLE | | 
 | 84 | 			    SPP_SACKDELAY_ENABLE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 |  | 
 | 86 | 	/* Initialize the default path max_retrans.  */ | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 87 | 	peer->pathmaxrxt  = sctp_max_retrans_path; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 |  | 
 | 89 | 	INIT_LIST_HEAD(&peer->transmitted); | 
 | 90 | 	INIT_LIST_HEAD(&peer->send_ready); | 
 | 91 | 	INIT_LIST_HEAD(&peer->transports); | 
 | 92 |  | 
| Pavel Emelyanov | b24b8a2 | 2008-01-23 21:20:07 -0800 | [diff] [blame] | 93 | 	setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, | 
 | 94 | 			(unsigned long)peer); | 
 | 95 | 	setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, | 
 | 96 | 			(unsigned long)peer); | 
| Vlad Yasevich | 50b5d6a | 2010-05-06 00:56:07 -0700 | [diff] [blame] | 97 | 	setup_timer(&peer->proto_unreach_timer, | 
 | 98 | 		    sctp_generate_proto_unreach_event, (unsigned long)peer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 |  | 
| Sridhar Samudrala | ad8fec1 | 2006-07-21 14:48:50 -0700 | [diff] [blame] | 100 | 	/* Initialize the 64-bit random nonce sent with heartbeat. */ | 
 | 101 | 	get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); | 
 | 102 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | 	atomic_set(&peer->refcnt, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 |  | 
 | 105 | 	return peer; | 
 | 106 | } | 
 | 107 |  | 
 | 108 | /* Allocate and initialize a new transport.  */ | 
| Alexey Dobriyan | 3182cd8 | 2005-07-11 20:57:47 -0700 | [diff] [blame] | 109 | struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 110 | 					  gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | { | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 112 | 	struct sctp_transport *transport; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 |  | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 114 | 	transport = t_new(struct sctp_transport, gfp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | 	if (!transport) | 
 | 116 | 		goto fail; | 
 | 117 |  | 
 | 118 | 	if (!sctp_transport_init(transport, addr, gfp)) | 
 | 119 | 		goto fail_init; | 
 | 120 |  | 
 | 121 | 	transport->malloced = 1; | 
 | 122 | 	SCTP_DBG_OBJCNT_INC(transport); | 
 | 123 |  | 
 | 124 | 	return transport; | 
 | 125 |  | 
 | 126 | fail_init: | 
 | 127 | 	kfree(transport); | 
 | 128 |  | 
 | 129 | fail: | 
 | 130 | 	return NULL; | 
 | 131 | } | 
 | 132 |  | 
 | 133 | /* This transport is no longer needed.  Free up if possible, or | 
 | 134 |  * delay until it last reference count. | 
 | 135 |  */ | 
 | 136 | void sctp_transport_free(struct sctp_transport *transport) | 
 | 137 | { | 
 | 138 | 	transport->dead = 1; | 
 | 139 |  | 
 | 140 | 	/* Try to delete the heartbeat timer.  */ | 
 | 141 | 	if (del_timer(&transport->hb_timer)) | 
 | 142 | 		sctp_transport_put(transport); | 
 | 143 |  | 
 | 144 | 	/* Delete the T3_rtx timer if it's active. | 
 | 145 | 	 * There is no point in not doing this now and letting | 
 | 146 | 	 * structure hang around in memory since we know | 
 | 147 | 	 * the tranport is going away. | 
 | 148 | 	 */ | 
 | 149 | 	if (timer_pending(&transport->T3_rtx_timer) && | 
 | 150 | 	    del_timer(&transport->T3_rtx_timer)) | 
 | 151 | 		sctp_transport_put(transport); | 
 | 152 |  | 
| Wei Yongjun | 55fa0cf | 2010-05-09 16:56:07 +0000 | [diff] [blame] | 153 | 	/* Delete the ICMP proto unreachable timer if it's active. */ | 
 | 154 | 	if (timer_pending(&transport->proto_unreach_timer) && | 
 | 155 | 	    del_timer(&transport->proto_unreach_timer)) | 
 | 156 | 		sctp_association_put(transport->asoc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 |  | 
 | 158 | 	sctp_transport_put(transport); | 
 | 159 | } | 
 | 160 |  | 
 | 161 | /* Destroy the transport data structure. | 
 | 162 |  * Assumes there are no more users of this structure. | 
 | 163 |  */ | 
 | 164 | static void sctp_transport_destroy(struct sctp_transport *transport) | 
 | 165 | { | 
 | 166 | 	SCTP_ASSERT(transport->dead, "Transport is not dead", return); | 
 | 167 |  | 
 | 168 | 	if (transport->asoc) | 
 | 169 | 		sctp_association_put(transport->asoc); | 
 | 170 |  | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 171 | 	sctp_packet_free(&transport->packet); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 |  | 
 | 173 | 	dst_release(transport->dst); | 
 | 174 | 	kfree(transport); | 
 | 175 | 	SCTP_DBG_OBJCNT_DEC(transport); | 
 | 176 | } | 
 | 177 |  | 
 | 178 | /* Start T3_rtx timer if it is not already running and update the heartbeat | 
 | 179 |  * timer.  This routine is called every time a DATA chunk is sent. | 
 | 180 |  */ | 
| Vlad Yasevich | d9efc22 | 2010-04-30 22:41:09 -0400 | [diff] [blame] | 181 | void sctp_transport_reset_timers(struct sctp_transport *transport) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | { | 
 | 183 | 	/* RFC 2960 6.3.2 Retransmission Timer Rules | 
 | 184 | 	 * | 
 | 185 | 	 * R1) Every time a DATA chunk is sent to any address(including a | 
 | 186 | 	 * retransmission), if the T3-rtx timer of that address is not running | 
 | 187 | 	 * start it running so that it will expire after the RTO of that | 
 | 188 | 	 * address. | 
 | 189 | 	 */ | 
 | 190 |  | 
| Vlad Yasevich | d9efc22 | 2010-04-30 22:41:09 -0400 | [diff] [blame] | 191 | 	if (!timer_pending(&transport->T3_rtx_timer)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | 		if (!mod_timer(&transport->T3_rtx_timer, | 
 | 193 | 			       jiffies + transport->rto)) | 
 | 194 | 			sctp_transport_hold(transport); | 
 | 195 |  | 
 | 196 | 	/* When a data chunk is sent, reset the heartbeat interval.  */ | 
 | 197 | 	if (!mod_timer(&transport->hb_timer, | 
 | 198 | 		       sctp_transport_timeout(transport))) | 
 | 199 | 	    sctp_transport_hold(transport); | 
 | 200 | } | 
 | 201 |  | 
 | 202 | /* This transport has been assigned to an association. | 
 | 203 |  * Initialize fields from the association or from the sock itself. | 
 | 204 |  * Register the reference count in the association. | 
 | 205 |  */ | 
 | 206 | void sctp_transport_set_owner(struct sctp_transport *transport, | 
 | 207 | 			      struct sctp_association *asoc) | 
 | 208 | { | 
 | 209 | 	transport->asoc = asoc; | 
 | 210 | 	sctp_association_hold(asoc); | 
 | 211 | } | 
 | 212 |  | 
 | 213 | /* Initialize the pmtu of a transport. */ | 
 | 214 | void sctp_transport_pmtu(struct sctp_transport *transport) | 
 | 215 | { | 
 | 216 | 	struct dst_entry *dst; | 
 | 217 |  | 
 | 218 | 	dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL); | 
 | 219 |  | 
 | 220 | 	if (dst) { | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 221 | 		transport->pathmtu = dst_mtu(dst); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | 		dst_release(dst); | 
 | 223 | 	} else | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 224 | 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | } | 
 | 226 |  | 
| Vlad Yasevich | c910b47 | 2007-06-07 13:47:03 -0400 | [diff] [blame] | 227 | /* this is a complete rip-off from __sk_dst_check | 
 | 228 |  * the cookie is always 0 since this is how it's used in the | 
 | 229 |  * pmtu code | 
 | 230 |  */ | 
 | 231 | static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) | 
 | 232 | { | 
 | 233 | 	struct dst_entry *dst = t->dst; | 
 | 234 |  | 
 | 235 | 	if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) { | 
 | 236 | 		dst_release(t->dst); | 
 | 237 | 		t->dst = NULL; | 
 | 238 | 		return NULL; | 
 | 239 | 	} | 
 | 240 |  | 
 | 241 | 	return dst; | 
 | 242 | } | 
 | 243 |  | 
 | 244 | void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) | 
 | 245 | { | 
 | 246 | 	struct dst_entry *dst; | 
 | 247 |  | 
 | 248 | 	if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { | 
| Joe Perches | 145ce50 | 2010-08-24 13:21:08 +0000 | [diff] [blame] | 249 | 		pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", | 
 | 250 | 			__func__, pmtu, | 
 | 251 | 			SCTP_DEFAULT_MINSEGMENT); | 
| Vlad Yasevich | c910b47 | 2007-06-07 13:47:03 -0400 | [diff] [blame] | 252 | 		/* Use default minimum segment size and disable | 
 | 253 | 		 * pmtu discovery on this transport. | 
 | 254 | 		 */ | 
 | 255 | 		t->pathmtu = SCTP_DEFAULT_MINSEGMENT; | 
| Vlad Yasevich | c910b47 | 2007-06-07 13:47:03 -0400 | [diff] [blame] | 256 | 	} else { | 
 | 257 | 		t->pathmtu = pmtu; | 
 | 258 | 	} | 
 | 259 |  | 
 | 260 | 	dst = sctp_transport_dst_check(t); | 
 | 261 | 	if (dst) | 
 | 262 | 		dst->ops->update_pmtu(dst, pmtu); | 
 | 263 | } | 
 | 264 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | /* Caches the dst entry and source address for a transport's destination | 
 | 266 |  * address. | 
 | 267 |  */ | 
 | 268 | void sctp_transport_route(struct sctp_transport *transport, | 
 | 269 | 			  union sctp_addr *saddr, struct sctp_sock *opt) | 
 | 270 | { | 
 | 271 | 	struct sctp_association *asoc = transport->asoc; | 
 | 272 | 	struct sctp_af *af = transport->af_specific; | 
 | 273 | 	union sctp_addr *daddr = &transport->ipaddr; | 
 | 274 | 	struct dst_entry *dst; | 
 | 275 |  | 
 | 276 | 	dst = af->get_dst(asoc, daddr, saddr); | 
 | 277 |  | 
 | 278 | 	if (saddr) | 
 | 279 | 		memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); | 
 | 280 | 	else | 
| YOSHIFUJI Hideaki | e511710 | 2008-05-29 19:55:05 +0900 | [diff] [blame] | 281 | 		af->get_saddr(opt, asoc, dst, daddr, &transport->saddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 |  | 
 | 283 | 	transport->dst = dst; | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 284 | 	if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { | 
 | 285 | 		return; | 
 | 286 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | 	if (dst) { | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 288 | 		transport->pathmtu = dst_mtu(dst); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 |  | 
 | 290 | 		/* Initialize sk->sk_rcv_saddr, if the transport is the | 
 | 291 | 		 * association's active path for getsockname(). | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 292 | 		 */ | 
| Vlad Yasevich | a78102e | 2009-11-11 11:54:37 +0000 | [diff] [blame] | 293 | 		if (asoc && (!asoc->peer.primary_path || | 
 | 294 | 				(transport == asoc->peer.active_path))) | 
| Neil Horman | bf031ff | 2005-12-02 20:32:29 -0800 | [diff] [blame] | 295 | 			opt->pf->af->to_sk_saddr(&transport->saddr, | 
 | 296 | 						 asoc->base.sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | 	} else | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 298 | 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | } | 
 | 300 |  | 
 | 301 | /* Hold a reference to a transport.  */ | 
 | 302 | void sctp_transport_hold(struct sctp_transport *transport) | 
 | 303 | { | 
 | 304 | 	atomic_inc(&transport->refcnt); | 
 | 305 | } | 
 | 306 |  | 
 | 307 | /* Release a reference to a transport and clean up | 
 | 308 |  * if there are no more references. | 
 | 309 |  */ | 
 | 310 | void sctp_transport_put(struct sctp_transport *transport) | 
 | 311 | { | 
 | 312 | 	if (atomic_dec_and_test(&transport->refcnt)) | 
 | 313 | 		sctp_transport_destroy(transport); | 
 | 314 | } | 
 | 315 |  | 
 | 316 | /* Update transport's RTO based on the newly calculated RTT. */ | 
 | 317 | void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) | 
 | 318 | { | 
 | 319 | 	/* Check for valid transport.  */ | 
 | 320 | 	SCTP_ASSERT(tp, "NULL transport", return); | 
 | 321 |  | 
 | 322 | 	/* We should not be doing any RTO updates unless rto_pending is set.  */ | 
 | 323 | 	SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return); | 
 | 324 |  | 
 | 325 | 	if (tp->rttvar || tp->srtt) { | 
 | 326 | 		/* 6.3.1 C3) When a new RTT measurement R' is made, set | 
 | 327 | 		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| | 
 | 328 | 		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' | 
 | 329 | 		 */ | 
 | 330 |  | 
 | 331 | 		/* Note:  The above algorithm has been rewritten to | 
 | 332 | 		 * express rto_beta and rto_alpha as inverse powers | 
 | 333 | 		 * of two. | 
 | 334 | 		 * For example, assuming the default value of RTO.Alpha of | 
 | 335 | 		 * 1/8, rto_alpha would be expressed as 3. | 
 | 336 | 		 */ | 
 | 337 | 		tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta) | 
 | 338 | 			+ ((abs(tp->srtt - rtt)) >> sctp_rto_beta); | 
 | 339 | 		tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha) | 
 | 340 | 			+ (rtt >> sctp_rto_alpha); | 
 | 341 | 	} else { | 
 | 342 | 		/* 6.3.1 C2) When the first RTT measurement R is made, set | 
 | 343 | 		 * SRTT <- R, RTTVAR <- R/2. | 
 | 344 | 		 */ | 
 | 345 | 		tp->srtt = rtt; | 
 | 346 | 		tp->rttvar = rtt >> 1; | 
 | 347 | 	} | 
 | 348 |  | 
 | 349 | 	/* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then | 
 | 350 | 	 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. | 
 | 351 | 	 */ | 
 | 352 | 	if (tp->rttvar == 0) | 
 | 353 | 		tp->rttvar = SCTP_CLOCK_GRANULARITY; | 
 | 354 |  | 
 | 355 | 	/* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ | 
 | 356 | 	tp->rto = tp->srtt + (tp->rttvar << 2); | 
 | 357 |  | 
 | 358 | 	/* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min | 
 | 359 | 	 * seconds then it is rounded up to RTO.Min seconds. | 
 | 360 | 	 */ | 
 | 361 | 	if (tp->rto < tp->asoc->rto_min) | 
 | 362 | 		tp->rto = tp->asoc->rto_min; | 
 | 363 |  | 
 | 364 | 	/* 6.3.1 C7) A maximum value may be placed on RTO provided it is | 
 | 365 | 	 * at least RTO.max seconds. | 
 | 366 | 	 */ | 
 | 367 | 	if (tp->rto > tp->asoc->rto_max) | 
 | 368 | 		tp->rto = tp->asoc->rto_max; | 
 | 369 |  | 
 | 370 | 	tp->rtt = rtt; | 
 | 371 |  | 
 | 372 | 	/* Reset rto_pending so that a new RTT measurement is started when a | 
 | 373 | 	 * new data chunk is sent. | 
 | 374 | 	 */ | 
 | 375 | 	tp->rto_pending = 0; | 
 | 376 |  | 
 | 377 | 	SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " | 
| Harvey Harrison | 0dc4787 | 2008-03-05 20:47:47 -0800 | [diff] [blame] | 378 | 			  "rttvar: %d, rto: %ld\n", __func__, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | 			  tp, rtt, tp->srtt, tp->rttvar, tp->rto); | 
 | 380 | } | 
 | 381 |  | 
 | 382 | /* This routine updates the transport's cwnd and partial_bytes_acked | 
 | 383 |  * parameters based on the bytes acked in the received SACK. | 
 | 384 |  */ | 
 | 385 | void sctp_transport_raise_cwnd(struct sctp_transport *transport, | 
 | 386 | 			       __u32 sack_ctsn, __u32 bytes_acked) | 
 | 387 | { | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 388 | 	struct sctp_association *asoc = transport->asoc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | 	__u32 cwnd, ssthresh, flight_size, pba, pmtu; | 
 | 390 |  | 
 | 391 | 	cwnd = transport->cwnd; | 
 | 392 | 	flight_size = transport->flight_size; | 
 | 393 |  | 
| Vlad Yasevich | a646523 | 2008-06-04 12:38:43 -0700 | [diff] [blame] | 394 | 	/* See if we need to exit Fast Recovery first */ | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 395 | 	if (asoc->fast_recovery && | 
 | 396 | 	    TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) | 
 | 397 | 		asoc->fast_recovery = 0; | 
| Vlad Yasevich | a646523 | 2008-06-04 12:38:43 -0700 | [diff] [blame] | 398 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | 	/* The appropriate cwnd increase algorithm is performed if, and only | 
| Vlad Yasevich | a646523 | 2008-06-04 12:38:43 -0700 | [diff] [blame] | 400 | 	 * if the cumulative TSN whould advanced and the congestion window is | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | 	 * being fully utilized. | 
 | 402 | 	 */ | 
| Vlad Yasevich | a646523 | 2008-06-04 12:38:43 -0700 | [diff] [blame] | 403 | 	if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) || | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | 	    (flight_size < cwnd)) | 
 | 405 | 		return; | 
 | 406 |  | 
 | 407 | 	ssthresh = transport->ssthresh; | 
 | 408 | 	pba = transport->partial_bytes_acked; | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 409 | 	pmtu = transport->asoc->pathmtu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 |  | 
 | 411 | 	if (cwnd <= ssthresh) { | 
| Vlad Yasevich | a646523 | 2008-06-04 12:38:43 -0700 | [diff] [blame] | 412 | 		/* RFC 4960 7.2.1 | 
 | 413 | 		 * o  When cwnd is less than or equal to ssthresh, an SCTP | 
 | 414 | 		 *    endpoint MUST use the slow-start algorithm to increase | 
 | 415 | 		 *    cwnd only if the current congestion window is being fully | 
 | 416 | 		 *    utilized, an incoming SACK advances the Cumulative TSN | 
 | 417 | 		 *    Ack Point, and the data sender is not in Fast Recovery. | 
 | 418 | 		 *    Only when these three conditions are met can the cwnd be | 
 | 419 | 		 *    increased; otherwise, the cwnd MUST not be increased. | 
 | 420 | 		 *    If these conditions are met, then cwnd MUST be increased | 
 | 421 | 		 *    by, at most, the lesser of 1) the total size of the | 
 | 422 | 		 *    previously outstanding DATA chunk(s) acknowledged, and | 
 | 423 | 		 *    2) the destination's path MTU.  This upper bound protects | 
 | 424 | 		 *    against the ACK-Splitting attack outlined in [SAVAGE99]. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | 		 */ | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 426 | 		if (asoc->fast_recovery) | 
| Vlad Yasevich | a646523 | 2008-06-04 12:38:43 -0700 | [diff] [blame] | 427 | 			return; | 
 | 428 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | 		if (bytes_acked > pmtu) | 
 | 430 | 			cwnd += pmtu; | 
 | 431 | 		else | 
 | 432 | 			cwnd += bytes_acked; | 
 | 433 | 		SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, " | 
 | 434 | 				  "bytes_acked: %d, cwnd: %d, ssthresh: %d, " | 
 | 435 | 				  "flight_size: %d, pba: %d\n", | 
| Harvey Harrison | 0dc4787 | 2008-03-05 20:47:47 -0800 | [diff] [blame] | 436 | 				  __func__, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | 				  transport, bytes_acked, cwnd, | 
 | 438 | 				  ssthresh, flight_size, pba); | 
 | 439 | 	} else { | 
 | 440 | 		/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, | 
 | 441 | 		 * upon each SACK arrival that advances the Cumulative TSN Ack | 
 | 442 | 		 * Point, increase partial_bytes_acked by the total number of | 
 | 443 | 		 * bytes of all new chunks acknowledged in that SACK including | 
 | 444 | 		 * chunks acknowledged by the new Cumulative TSN Ack and by | 
 | 445 | 		 * Gap Ack Blocks. | 
 | 446 | 		 * | 
 | 447 | 		 * When partial_bytes_acked is equal to or greater than cwnd | 
 | 448 | 		 * and before the arrival of the SACK the sender had cwnd or | 
 | 449 | 		 * more bytes of data outstanding (i.e., before arrival of the | 
 | 450 | 		 * SACK, flightsize was greater than or equal to cwnd), | 
 | 451 | 		 * increase cwnd by MTU, and reset partial_bytes_acked to | 
 | 452 | 		 * (partial_bytes_acked - cwnd). | 
 | 453 | 		 */ | 
 | 454 | 		pba += bytes_acked; | 
 | 455 | 		if (pba >= cwnd) { | 
 | 456 | 			cwnd += pmtu; | 
 | 457 | 			pba = ((cwnd < pba) ? (pba - cwnd) : 0); | 
 | 458 | 		} | 
 | 459 | 		SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: " | 
 | 460 | 				  "transport: %p, bytes_acked: %d, cwnd: %d, " | 
 | 461 | 				  "ssthresh: %d, flight_size: %d, pba: %d\n", | 
| Harvey Harrison | 0dc4787 | 2008-03-05 20:47:47 -0800 | [diff] [blame] | 462 | 				  __func__, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | 				  transport, bytes_acked, cwnd, | 
 | 464 | 				  ssthresh, flight_size, pba); | 
 | 465 | 	} | 
 | 466 |  | 
 | 467 | 	transport->cwnd = cwnd; | 
 | 468 | 	transport->partial_bytes_acked = pba; | 
 | 469 | } | 
 | 470 |  | 
 | 471 | /* This routine is used to lower the transport's cwnd when congestion is | 
 | 472 |  * detected. | 
 | 473 |  */ | 
 | 474 | void sctp_transport_lower_cwnd(struct sctp_transport *transport, | 
 | 475 | 			       sctp_lower_cwnd_t reason) | 
 | 476 | { | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 477 | 	struct sctp_association *asoc = transport->asoc; | 
 | 478 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | 	switch (reason) { | 
 | 480 | 	case SCTP_LOWER_CWND_T3_RTX: | 
 | 481 | 		/* RFC 2960 Section 7.2.3, sctpimpguide | 
 | 482 | 		 * When the T3-rtx timer expires on an address, SCTP should | 
 | 483 | 		 * perform slow start by: | 
 | 484 | 		 *      ssthresh = max(cwnd/2, 4*MTU) | 
 | 485 | 		 *      cwnd = 1*MTU | 
 | 486 | 		 *      partial_bytes_acked = 0 | 
 | 487 | 		 */ | 
 | 488 | 		transport->ssthresh = max(transport->cwnd/2, | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 489 | 					  4*asoc->pathmtu); | 
 | 490 | 		transport->cwnd = asoc->pathmtu; | 
| Vlad Yasevich | 33ce828 | 2009-09-04 18:20:58 -0400 | [diff] [blame] | 491 |  | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 492 | 		/* T3-rtx also clears fast recovery */ | 
 | 493 | 		asoc->fast_recovery = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | 		break; | 
 | 495 |  | 
 | 496 | 	case SCTP_LOWER_CWND_FAST_RTX: | 
 | 497 | 		/* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the | 
 | 498 | 		 * destination address(es) to which the missing DATA chunks | 
 | 499 | 		 * were last sent, according to the formula described in | 
 | 500 | 		 * Section 7.2.3. | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 501 | 		 * | 
 | 502 | 		 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | 		 * losses from SACK (see Section 7.2.4), An endpoint | 
 | 504 | 		 * should do the following: | 
 | 505 | 		 *      ssthresh = max(cwnd/2, 4*MTU) | 
 | 506 | 		 *      cwnd = ssthresh | 
 | 507 | 		 *      partial_bytes_acked = 0 | 
 | 508 | 		 */ | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 509 | 		if (asoc->fast_recovery) | 
| Vlad Yasevich | a646523 | 2008-06-04 12:38:43 -0700 | [diff] [blame] | 510 | 			return; | 
 | 511 |  | 
 | 512 | 		/* Mark Fast recovery */ | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 513 | 		asoc->fast_recovery = 1; | 
 | 514 | 		asoc->fast_recovery_exit = asoc->next_tsn - 1; | 
| Vlad Yasevich | a646523 | 2008-06-04 12:38:43 -0700 | [diff] [blame] | 515 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | 		transport->ssthresh = max(transport->cwnd/2, | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 517 | 					  4*asoc->pathmtu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | 		transport->cwnd = transport->ssthresh; | 
 | 519 | 		break; | 
 | 520 |  | 
 | 521 | 	case SCTP_LOWER_CWND_ECNE: | 
 | 522 | 		/* RFC 2481 Section 6.1.2. | 
 | 523 | 		 * If the sender receives an ECN-Echo ACK packet | 
 | 524 | 		 * then the sender knows that congestion was encountered in the | 
 | 525 | 		 * network on the path from the sender to the receiver. The | 
 | 526 | 		 * indication of congestion should be treated just as a | 
 | 527 | 		 * congestion loss in non-ECN Capable TCP. That is, the TCP | 
 | 528 | 		 * source halves the congestion window "cwnd" and reduces the | 
 | 529 | 		 * slow start threshold "ssthresh". | 
 | 530 | 		 * A critical condition is that TCP does not react to | 
 | 531 | 		 * congestion indications more than once every window of | 
 | 532 | 		 * data (or more loosely more than once every round-trip time). | 
 | 533 | 		 */ | 
| Wei Yongjun | f61f6f8 | 2009-03-02 09:46:13 +0000 | [diff] [blame] | 534 | 		if (time_after(jiffies, transport->last_time_ecne_reduced + | 
 | 535 | 					transport->rtt)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | 			transport->ssthresh = max(transport->cwnd/2, | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 537 | 						  4*asoc->pathmtu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | 			transport->cwnd = transport->ssthresh; | 
 | 539 | 			transport->last_time_ecne_reduced = jiffies; | 
 | 540 | 		} | 
 | 541 | 		break; | 
 | 542 |  | 
 | 543 | 	case SCTP_LOWER_CWND_INACTIVE: | 
 | 544 | 		/* RFC 2960 Section 7.2.1, sctpimpguide | 
 | 545 | 		 * When the endpoint does not transmit data on a given | 
 | 546 | 		 * transport address, the cwnd of the transport address | 
 | 547 | 		 * should be adjusted to max(cwnd/2, 4*MTU) per RTO. | 
 | 548 | 		 * NOTE: Although the draft recommends that this check needs | 
 | 549 | 		 * to be done every RTO interval, we do it every hearbeat | 
 | 550 | 		 * interval. | 
 | 551 | 		 */ | 
| Vlad Yasevich | 245cba7 | 2009-11-23 15:53:58 -0500 | [diff] [blame] | 552 | 		transport->cwnd = max(transport->cwnd/2, | 
| Vlad Yasevich | cf9b481 | 2010-04-30 22:41:10 -0400 | [diff] [blame] | 553 | 					 4*asoc->pathmtu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | 		break; | 
| Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 555 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 |  | 
 | 557 | 	transport->partial_bytes_acked = 0; | 
 | 558 | 	SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " | 
| Harvey Harrison | 0dc4787 | 2008-03-05 20:47:47 -0800 | [diff] [blame] | 559 | 			  "%d ssthresh: %d\n", __func__, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | 			  transport, reason, | 
 | 561 | 			  transport->cwnd, transport->ssthresh); | 
 | 562 | } | 
 | 563 |  | 
| Vlad Yasevich | 46d5a80 | 2009-11-23 15:54:00 -0500 | [diff] [blame] | 564 | /* Apply Max.Burst limit to the congestion window: | 
 | 565 |  * sctpimpguide-05 2.14.2 | 
 | 566 |  * D) When the time comes for the sender to | 
 | 567 |  * transmit new DATA chunks, the protocol parameter Max.Burst MUST | 
 | 568 |  * first be applied to limit how many new DATA chunks may be sent. | 
 | 569 |  * The limit is applied by adjusting cwnd as follows: | 
 | 570 |  * 	if ((flightsize+ Max.Burst * MTU) < cwnd) | 
 | 571 |  * 		cwnd = flightsize + Max.Burst * MTU | 
 | 572 |  */ | 
 | 573 |  | 
 | 574 | void sctp_transport_burst_limited(struct sctp_transport *t) | 
 | 575 | { | 
 | 576 | 	struct sctp_association *asoc = t->asoc; | 
 | 577 | 	u32 old_cwnd = t->cwnd; | 
 | 578 | 	u32 max_burst_bytes; | 
 | 579 |  | 
 | 580 | 	if (t->burst_limited) | 
 | 581 | 		return; | 
 | 582 |  | 
 | 583 | 	max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); | 
 | 584 | 	if (max_burst_bytes < old_cwnd) { | 
 | 585 | 		t->cwnd = max_burst_bytes; | 
 | 586 | 		t->burst_limited = old_cwnd; | 
 | 587 | 	} | 
 | 588 | } | 
 | 589 |  | 
 | 590 | /* Restore the old cwnd congestion window, after the burst had it's | 
 | 591 |  * desired effect. | 
 | 592 |  */ | 
 | 593 | void sctp_transport_burst_reset(struct sctp_transport *t) | 
 | 594 | { | 
 | 595 | 	if (t->burst_limited) { | 
 | 596 | 		t->cwnd = t->burst_limited; | 
 | 597 | 		t->burst_limited = 0; | 
 | 598 | 	} | 
 | 599 | } | 
 | 600 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | /* What is the next timeout value for this transport? */ | 
 | 602 | unsigned long sctp_transport_timeout(struct sctp_transport *t) | 
 | 603 | { | 
 | 604 | 	unsigned long timeout; | 
| Sridhar Samudrala | ad8fec1 | 2006-07-21 14:48:50 -0700 | [diff] [blame] | 605 | 	timeout = t->rto + sctp_jitter(t->rto); | 
 | 606 | 	if (t->state != SCTP_UNCONFIRMED) | 
 | 607 | 		timeout += t->hbinterval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | 	timeout += jiffies; | 
 | 609 | 	return timeout; | 
 | 610 | } | 
| Vlad Yasevich | 749bf92 | 2007-03-19 17:02:30 -0700 | [diff] [blame] | 611 |  | 
 | 612 | /* Reset transport variables to their initial values */ | 
 | 613 | void sctp_transport_reset(struct sctp_transport *t) | 
 | 614 | { | 
 | 615 | 	struct sctp_association *asoc = t->asoc; | 
 | 616 |  | 
 | 617 | 	/* RFC 2960 (bis), Section 5.2.4 | 
 | 618 | 	 * All the congestion control parameters (e.g., cwnd, ssthresh) | 
 | 619 | 	 * related to this peer MUST be reset to their initial values | 
 | 620 | 	 * (see Section 6.2.1) | 
 | 621 | 	 */ | 
 | 622 | 	t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); | 
| Vlad Yasevich | 46d5a80 | 2009-11-23 15:54:00 -0500 | [diff] [blame] | 623 | 	t->burst_limited = 0; | 
| Vlad Yasevich | 289f424 | 2007-03-22 12:26:25 -0700 | [diff] [blame] | 624 | 	t->ssthresh = asoc->peer.i.a_rwnd; | 
| Andrei Pelinescu-Onciul | 5fdd4ba | 2009-11-29 00:14:02 -0800 | [diff] [blame] | 625 | 	t->rto = asoc->rto_initial; | 
| Vlad Yasevich | 749bf92 | 2007-03-19 17:02:30 -0700 | [diff] [blame] | 626 | 	t->rtt = 0; | 
 | 627 | 	t->srtt = 0; | 
 | 628 | 	t->rttvar = 0; | 
 | 629 |  | 
 | 630 | 	/* Reset these additional varibles so that we have a clean | 
 | 631 | 	 * slate. | 
 | 632 | 	 */ | 
 | 633 | 	t->partial_bytes_acked = 0; | 
 | 634 | 	t->flight_size = 0; | 
 | 635 | 	t->error_count = 0; | 
 | 636 | 	t->rto_pending = 0; | 
| Vlad Yasevich | faee47c | 2009-02-13 08:33:43 +0000 | [diff] [blame] | 637 | 	t->hb_sent = 0; | 
| Vlad Yasevich | 749bf92 | 2007-03-19 17:02:30 -0700 | [diff] [blame] | 638 |  | 
 | 639 | 	/* Initialize the state information for SFR-CACC */ | 
 | 640 | 	t->cacc.changeover_active = 0; | 
 | 641 | 	t->cacc.cycling_changeover = 0; | 
 | 642 | 	t->cacc.next_tsn_at_change = 0; | 
 | 643 | 	t->cacc.cacc_saw_newack = 0; | 
 | 644 | } |