| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 1 | /* SCTP kernel implementation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * Copyright (c) 1999-2000 Cisco, Inc. | 
 | 3 |  * Copyright (c) 1999-2001 Motorola, Inc. | 
 | 4 |  * Copyright (c) 2001-2003 International Business Machines, Corp. | 
 | 5 |  * Copyright (c) 2001 Intel Corp. | 
 | 6 |  * Copyright (c) 2001 Nokia, Inc. | 
 | 7 |  * Copyright (c) 2001 La Monte H.P. Yarroll | 
 | 8 |  * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 9 |  * This file is part of the SCTP kernel implementation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 |  * | 
 | 11 |  * These functions handle all input from the IP layer into SCTP. | 
 | 12 |  * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 13 |  * This SCTP implementation is free software; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  * you can redistribute it and/or modify it under the terms of | 
 | 15 |  * the GNU General Public License as published by | 
 | 16 |  * the Free Software Foundation; either version 2, or (at your option) | 
 | 17 |  * any later version. | 
 | 18 |  * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 19 |  * This SCTP implementation is distributed in the hope that it | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 
 | 21 |  *                 ************************ | 
 | 22 |  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 
 | 23 |  * See the GNU General Public License for more details. | 
 | 24 |  * | 
 | 25 |  * You should have received a copy of the GNU General Public License | 
 | 26 |  * along with GNU CC; see the file COPYING.  If not, write to | 
 | 27 |  * the Free Software Foundation, 59 Temple Place - Suite 330, | 
 | 28 |  * Boston, MA 02111-1307, USA. | 
 | 29 |  * | 
 | 30 |  * Please send any bug reports or fixes you make to the | 
 | 31 |  * email address(es): | 
 | 32 |  *    lksctp developers <lksctp-developers@lists.sourceforge.net> | 
 | 33 |  * | 
 | 34 |  * Or submit a bug report through the following website: | 
 | 35 |  *    http://www.sf.net/projects/lksctp | 
 | 36 |  * | 
 | 37 |  * Written or modified by: | 
 | 38 |  *    La Monte H.P. Yarroll <piggy@acm.org> | 
 | 39 |  *    Karl Knutson <karl@athena.chicago.il.us> | 
 | 40 |  *    Xingang Guo <xingang.guo@intel.com> | 
 | 41 |  *    Jon Grimm <jgrimm@us.ibm.com> | 
 | 42 |  *    Hui Huang <hui.huang@nokia.com> | 
 | 43 |  *    Daisy Chang <daisyc@us.ibm.com> | 
 | 44 |  *    Sridhar Samudrala <sri@us.ibm.com> | 
 | 45 |  *    Ardelle Fan <ardelle.fan@intel.com> | 
 | 46 |  * | 
 | 47 |  * Any bugs reported given to us we will try to fix... any fixes shared will | 
 | 48 |  * be incorporated into the next SCTP release. | 
 | 49 |  */ | 
 | 50 |  | 
 | 51 | #include <linux/types.h> | 
 | 52 | #include <linux/list.h> /* For struct list_head */ | 
 | 53 | #include <linux/socket.h> | 
 | 54 | #include <linux/ip.h> | 
 | 55 | #include <linux/time.h> /* For struct timeval */ | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 56 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | #include <net/ip.h> | 
 | 58 | #include <net/icmp.h> | 
 | 59 | #include <net/snmp.h> | 
 | 60 | #include <net/sock.h> | 
 | 61 | #include <net/xfrm.h> | 
 | 62 | #include <net/sctp/sctp.h> | 
 | 63 | #include <net/sctp/sm.h> | 
| Vlad Yasevich | 9ad0977 | 2007-12-16 14:06:41 -0800 | [diff] [blame] | 64 | #include <net/sctp/checksum.h> | 
| Pavel Emelyanov | dcfc23c | 2008-07-14 23:03:00 -0700 | [diff] [blame] | 65 | #include <net/net_namespace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 |  | 
 | 67 | /* Forward declarations for internal helpers. */ | 
 | 68 | static int sctp_rcv_ootb(struct sk_buff *); | 
 | 69 | static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, | 
 | 70 | 				      const union sctp_addr *laddr, | 
 | 71 | 				      const union sctp_addr *paddr, | 
 | 72 | 				      struct sctp_transport **transportp); | 
 | 73 | static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr); | 
 | 74 | static struct sctp_association *__sctp_lookup_association( | 
 | 75 | 					const union sctp_addr *local, | 
 | 76 | 					const union sctp_addr *peer, | 
 | 77 | 					struct sctp_transport **pt); | 
 | 78 |  | 
| Zhu Yi | 50b1a78 | 2010-03-04 18:01:44 +0000 | [diff] [blame] | 79 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb); | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 80 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 |  | 
 | 82 | /* Calculate the SCTP checksum of an SCTP packet.  */ | 
 | 83 | static inline int sctp_rcv_checksum(struct sk_buff *skb) | 
 | 84 | { | 
| Arnaldo Carvalho de Melo | 2c0fd38 | 2007-03-13 13:59:32 -0300 | [diff] [blame] | 85 | 	struct sctphdr *sh = sctp_hdr(skb); | 
| Vlad Yasevich | 4458f04 | 2009-02-13 08:33:42 +0000 | [diff] [blame] | 86 | 	__le32 cmp = sh->checksum; | 
| David S. Miller | 1b003be | 2009-06-09 00:22:35 -0700 | [diff] [blame] | 87 | 	struct sk_buff *list; | 
| Vlad Yasevich | 4458f04 | 2009-02-13 08:33:42 +0000 | [diff] [blame] | 88 | 	__le32 val; | 
 | 89 | 	__u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 |  | 
| David S. Miller | 1b003be | 2009-06-09 00:22:35 -0700 | [diff] [blame] | 91 | 	skb_walk_frags(skb, list) | 
| Vlad Yasevich | 4458f04 | 2009-02-13 08:33:42 +0000 | [diff] [blame] | 92 | 		tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), | 
 | 93 | 					tmp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 |  | 
| Vlad Yasevich | 4458f04 | 2009-02-13 08:33:42 +0000 | [diff] [blame] | 95 | 	val = sctp_end_cksum(tmp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  | 
 | 97 | 	if (val != cmp) { | 
 | 98 | 		/* CRC failure, dump it. */ | 
 | 99 | 		SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS); | 
 | 100 | 		return -1; | 
 | 101 | 	} | 
 | 102 | 	return 0; | 
 | 103 | } | 
 | 104 |  | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 105 | struct sctp_input_cb { | 
 | 106 | 	union { | 
 | 107 | 		struct inet_skb_parm	h4; | 
| Eric Dumazet | dfd56b8 | 2011-12-10 09:48:31 +0000 | [diff] [blame] | 108 | #if IS_ENABLED(CONFIG_IPV6) | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 109 | 		struct inet6_skb_parm	h6; | 
 | 110 | #endif | 
 | 111 | 	} header; | 
 | 112 | 	struct sctp_chunk *chunk; | 
 | 113 | }; | 
 | 114 | #define SCTP_INPUT_CB(__skb)	((struct sctp_input_cb *)&((__skb)->cb[0])) | 
 | 115 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | /* | 
 | 117 |  * This is the routine which IP calls when receiving an SCTP packet. | 
 | 118 |  */ | 
 | 119 | int sctp_rcv(struct sk_buff *skb) | 
 | 120 | { | 
 | 121 | 	struct sock *sk; | 
 | 122 | 	struct sctp_association *asoc; | 
 | 123 | 	struct sctp_endpoint *ep = NULL; | 
 | 124 | 	struct sctp_ep_common *rcvr; | 
 | 125 | 	struct sctp_transport *transport = NULL; | 
 | 126 | 	struct sctp_chunk *chunk; | 
 | 127 | 	struct sctphdr *sh; | 
 | 128 | 	union sctp_addr src; | 
 | 129 | 	union sctp_addr dest; | 
 | 130 | 	int family; | 
 | 131 | 	struct sctp_af *af; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 |  | 
 | 133 | 	if (skb->pkt_type!=PACKET_HOST) | 
 | 134 | 		goto discard_it; | 
 | 135 |  | 
 | 136 | 	SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS); | 
 | 137 |  | 
| Herbert Xu | 28cd775 | 2006-10-29 23:46:42 -0800 | [diff] [blame] | 138 | 	if (skb_linearize(skb)) | 
 | 139 | 		goto discard_it; | 
 | 140 |  | 
| Arnaldo Carvalho de Melo | 2c0fd38 | 2007-03-13 13:59:32 -0300 | [diff] [blame] | 141 | 	sh = sctp_hdr(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 |  | 
 | 143 | 	/* Pull up the IP and SCTP headers. */ | 
| Arnaldo Carvalho de Melo | ea2ae17 | 2007-04-25 17:55:53 -0700 | [diff] [blame] | 144 | 	__skb_pull(skb, skb_transport_offset(skb)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | 	if (skb->len < sizeof(struct sctphdr)) | 
 | 146 | 		goto discard_it; | 
| Lucas Nussbaum | 06e8680 | 2009-02-13 08:33:41 +0000 | [diff] [blame] | 147 | 	if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) && | 
 | 148 | 		  sctp_rcv_checksum(skb) < 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | 		goto discard_it; | 
 | 150 |  | 
 | 151 | 	skb_pull(skb, sizeof(struct sctphdr)); | 
 | 152 |  | 
 | 153 | 	/* Make sure we at least have chunk headers worth of data left. */ | 
 | 154 | 	if (skb->len < sizeof(struct sctp_chunkhdr)) | 
 | 155 | 		goto discard_it; | 
 | 156 |  | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 157 | 	family = ipver2af(ip_hdr(skb)->version); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | 	af = sctp_get_af_specific(family); | 
 | 159 | 	if (unlikely(!af)) | 
 | 160 | 		goto discard_it; | 
 | 161 |  | 
 | 162 | 	/* Initialize local addresses for lookups. */ | 
 | 163 | 	af->from_skb(&src, skb, 1); | 
 | 164 | 	af->from_skb(&dest, skb, 0); | 
 | 165 |  | 
 | 166 | 	/* If the packet is to or from a non-unicast address, | 
 | 167 | 	 * silently discard the packet. | 
 | 168 | 	 * | 
 | 169 | 	 * This is not clearly defined in the RFC except in section | 
 | 170 | 	 * 8.4 - OOTB handling.  However, based on the book "Stream Control | 
 | 171 | 	 * Transmission Protocol" 2.1, "It is important to note that the | 
 | 172 | 	 * IP address of an SCTP transport address must be a routable | 
 | 173 | 	 * unicast address.  In other words, IP multicast addresses and | 
 | 174 | 	 * IP broadcast addresses cannot be used in an SCTP transport | 
 | 175 | 	 * address." | 
 | 176 | 	 */ | 
| Vlad Yasevich | 5636bef | 2006-06-17 22:55:35 -0700 | [diff] [blame] | 177 | 	if (!af->addr_valid(&src, NULL, skb) || | 
 | 178 | 	    !af->addr_valid(&dest, NULL, skb)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | 		goto discard_it; | 
 | 180 |  | 
| Al Viro | d55c41b | 2006-11-20 17:09:40 -0800 | [diff] [blame] | 181 | 	asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport); | 
| Al Viro | 1c7d1fc | 2006-11-20 17:08:09 -0800 | [diff] [blame] | 182 |  | 
| Neil Horman | 0fd9a65 | 2005-06-13 15:11:24 -0700 | [diff] [blame] | 183 | 	if (!asoc) | 
| Al Viro | d55c41b | 2006-11-20 17:09:40 -0800 | [diff] [blame] | 184 | 		ep = __sctp_rcv_lookup_endpoint(&dest); | 
| Neil Horman | 0fd9a65 | 2005-06-13 15:11:24 -0700 | [diff] [blame] | 185 |  | 
 | 186 | 	/* Retrieve the common input handling substructure. */ | 
 | 187 | 	rcvr = asoc ? &asoc->base : &ep->base; | 
 | 188 | 	sk = rcvr->sk; | 
 | 189 |  | 
 | 190 | 	/* | 
 | 191 | 	 * If a frame arrives on an interface and the receiving socket is | 
 | 192 | 	 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB | 
 | 193 | 	 */ | 
 | 194 | 	if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) | 
 | 195 | 	{ | 
| Neil Horman | 0fd9a65 | 2005-06-13 15:11:24 -0700 | [diff] [blame] | 196 | 		if (asoc) { | 
 | 197 | 			sctp_association_put(asoc); | 
 | 198 | 			asoc = NULL; | 
 | 199 | 		} else { | 
 | 200 | 			sctp_endpoint_put(ep); | 
 | 201 | 			ep = NULL; | 
 | 202 | 		} | 
 | 203 | 		sk = sctp_get_ctl_sock(); | 
 | 204 | 		ep = sctp_sk(sk)->ep; | 
 | 205 | 		sctp_endpoint_hold(ep); | 
| Neil Horman | 0fd9a65 | 2005-06-13 15:11:24 -0700 | [diff] [blame] | 206 | 		rcvr = &ep->base; | 
 | 207 | 	} | 
 | 208 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | 	/* | 
 | 210 | 	 * RFC 2960, 8.4 - Handle "Out of the blue" Packets. | 
 | 211 | 	 * An SCTP packet is called an "out of the blue" (OOTB) | 
 | 212 | 	 * packet if it is correctly formed, i.e., passed the | 
 | 213 | 	 * receiver's checksum check, but the receiver is not | 
 | 214 | 	 * able to identify the association to which this | 
 | 215 | 	 * packet belongs. | 
 | 216 | 	 */ | 
 | 217 | 	if (!asoc) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | 		if (sctp_rcv_ootb(skb)) { | 
 | 219 | 			SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES); | 
 | 220 | 			goto discard_release; | 
 | 221 | 		} | 
 | 222 | 	} | 
 | 223 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | 	if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) | 
 | 225 | 		goto discard_release; | 
| Patrick McHardy | b59c270 | 2006-01-06 23:06:10 -0800 | [diff] [blame] | 226 | 	nf_reset(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 |  | 
| Dmitry Mishin | fda9ef5 | 2006-08-31 15:28:39 -0700 | [diff] [blame] | 228 | 	if (sk_filter(sk, skb)) | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 229 | 		goto discard_release; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 |  | 
 | 231 | 	/* Create an SCTP packet structure. */ | 
 | 232 | 	chunk = sctp_chunkify(skb, asoc, sk); | 
| Herbert Xu | 2babf9d | 2006-03-25 01:25:29 -0800 | [diff] [blame] | 233 | 	if (!chunk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | 		goto discard_release; | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 235 | 	SCTP_INPUT_CB(skb)->chunk = chunk; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 	/* Remember what endpoint is to handle this packet. */ | 
 | 238 | 	chunk->rcvr = rcvr; | 
 | 239 |  | 
 | 240 | 	/* Remember the SCTP header. */ | 
 | 241 | 	chunk->sctp_hdr = sh; | 
 | 242 |  | 
 | 243 | 	/* Set the source and destination addresses of the incoming chunk.  */ | 
| Al Viro | d55c41b | 2006-11-20 17:09:40 -0800 | [diff] [blame] | 244 | 	sctp_init_addrs(chunk, &src, &dest); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 |  | 
 | 246 | 	/* Remember where we came from.  */ | 
 | 247 | 	chunk->transport = transport; | 
 | 248 |  | 
 | 249 | 	/* Acquire access to the sock lock. Note: We are safe from other | 
 | 250 | 	 * bottom halves on this lock, but a user may be in the lock too, | 
 | 251 | 	 * so check if it is busy. | 
 | 252 | 	 */ | 
 | 253 | 	sctp_bh_lock_sock(sk); | 
 | 254 |  | 
| Vlad Yasevich | ae53b5b | 2009-01-22 14:53:23 -0800 | [diff] [blame] | 255 | 	if (sk != rcvr->sk) { | 
 | 256 | 		/* Our cached sk is different from the rcvr->sk.  This is | 
 | 257 | 		 * because migrate()/accept() may have moved the association | 
 | 258 | 		 * to a new socket and released all the sockets.  So now we | 
 | 259 | 		 * are holding a lock on the old socket while the user may | 
 | 260 | 		 * be doing something with the new socket.  Switch our veiw | 
 | 261 | 		 * of the current sk. | 
 | 262 | 		 */ | 
 | 263 | 		sctp_bh_unlock_sock(sk); | 
 | 264 | 		sk = rcvr->sk; | 
 | 265 | 		sctp_bh_lock_sock(sk); | 
 | 266 | 	} | 
 | 267 |  | 
| Sridhar Samudrala | ac0b046 | 2006-08-22 00:15:33 -0700 | [diff] [blame] | 268 | 	if (sock_owned_by_user(sk)) { | 
| Zhu Yi | 50b1a78 | 2010-03-04 18:01:44 +0000 | [diff] [blame] | 269 | 		if (sctp_add_backlog(sk, skb)) { | 
 | 270 | 			sctp_bh_unlock_sock(sk); | 
 | 271 | 			sctp_chunk_free(chunk); | 
 | 272 | 			skb = NULL; /* sctp_chunk_free already freed the skb */ | 
 | 273 | 			goto discard_release; | 
 | 274 | 		} | 
| Sridhar Samudrala | ac0b046 | 2006-08-22 00:15:33 -0700 | [diff] [blame] | 275 | 		SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); | 
| Sridhar Samudrala | ac0b046 | 2006-08-22 00:15:33 -0700 | [diff] [blame] | 276 | 	} else { | 
 | 277 | 		SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 278 | 		sctp_inq_push(&chunk->rcvr->inqueue, chunk); | 
| Sridhar Samudrala | ac0b046 | 2006-08-22 00:15:33 -0700 | [diff] [blame] | 279 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | 	sctp_bh_unlock_sock(sk); | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 282 |  | 
 | 283 | 	/* Release the asoc/ep ref we took in the lookup calls. */ | 
 | 284 | 	if (asoc) | 
 | 285 | 		sctp_association_put(asoc); | 
 | 286 | 	else | 
 | 287 | 		sctp_endpoint_put(ep); | 
| Sridhar Samudrala | 7a48f92 | 2006-01-17 11:51:28 -0800 | [diff] [blame] | 288 |  | 
| Herbert Xu | 2babf9d | 2006-03-25 01:25:29 -0800 | [diff] [blame] | 289 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 |  | 
 | 291 | discard_it: | 
| Sridhar Samudrala | ac0b046 | 2006-08-22 00:15:33 -0700 | [diff] [blame] | 292 | 	SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | 	kfree_skb(skb); | 
| Herbert Xu | 2babf9d | 2006-03-25 01:25:29 -0800 | [diff] [blame] | 294 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 |  | 
 | 296 | discard_release: | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 297 | 	/* Release the asoc/ep ref we took in the lookup calls. */ | 
| Neil Horman | 0fd9a65 | 2005-06-13 15:11:24 -0700 | [diff] [blame] | 298 | 	if (asoc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | 		sctp_association_put(asoc); | 
| Neil Horman | 0fd9a65 | 2005-06-13 15:11:24 -0700 | [diff] [blame] | 300 | 	else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | 		sctp_endpoint_put(ep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 |  | 
 | 303 | 	goto discard_it; | 
 | 304 | } | 
 | 305 |  | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 306 | /* Process the backlog queue of the socket.  Every skb on | 
 | 307 |  * the backlog holds a ref on an association or endpoint. | 
 | 308 |  * We hold this ref throughout the state machine to make | 
 | 309 |  * sure that the structure we need is still around. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 |  */ | 
 | 311 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 
 | 312 | { | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 313 | 	struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 314 | 	struct sctp_inq *inqueue = &chunk->rcvr->inqueue; | 
 | 315 | 	struct sctp_ep_common *rcvr = NULL; | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 316 | 	int backloged = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 |  | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 318 | 	rcvr = chunk->rcvr; | 
| Sridhar Samudrala | c4d2444 | 2006-01-17 11:56:26 -0800 | [diff] [blame] | 319 |  | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 320 | 	/* If the rcvr is dead then the association or endpoint | 
 | 321 | 	 * has been deleted and we can safely drop the chunk | 
 | 322 | 	 * and refs that we are holding. | 
 | 323 | 	 */ | 
 | 324 | 	if (rcvr->dead) { | 
 | 325 | 		sctp_chunk_free(chunk); | 
 | 326 | 		goto done; | 
 | 327 | 	} | 
| Sridhar Samudrala | c4d2444 | 2006-01-17 11:56:26 -0800 | [diff] [blame] | 328 |  | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 329 | 	if (unlikely(rcvr->sk != sk)) { | 
 | 330 | 		/* In this case, the association moved from one socket to | 
 | 331 | 		 * another.  We are currently sitting on the backlog of the | 
 | 332 | 		 * old socket, so we need to move. | 
 | 333 | 		 * However, since we are here in the process context we | 
 | 334 | 		 * need to take make sure that the user doesn't own | 
 | 335 | 		 * the new socket when we process the packet. | 
 | 336 | 		 * If the new socket is user-owned, queue the chunk to the | 
 | 337 | 		 * backlog of the new socket without dropping any refs. | 
 | 338 | 		 * Otherwise, we can safely push the chunk on the inqueue. | 
 | 339 | 		 */ | 
| Sridhar Samudrala | 7a48f92 | 2006-01-17 11:51:28 -0800 | [diff] [blame] | 340 |  | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 341 | 		sk = rcvr->sk; | 
 | 342 | 		sctp_bh_lock_sock(sk); | 
 | 343 |  | 
 | 344 | 		if (sock_owned_by_user(sk)) { | 
| Zhu Yi | a3a858f | 2010-03-04 18:01:47 +0000 | [diff] [blame] | 345 | 			if (sk_add_backlog(sk, skb)) | 
| Zhu Yi | 50b1a78 | 2010-03-04 18:01:44 +0000 | [diff] [blame] | 346 | 				sctp_chunk_free(chunk); | 
 | 347 | 			else | 
 | 348 | 				backloged = 1; | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 349 | 		} else | 
 | 350 | 			sctp_inq_push(inqueue, chunk); | 
 | 351 |  | 
 | 352 | 		sctp_bh_unlock_sock(sk); | 
 | 353 |  | 
 | 354 | 		/* If the chunk was backloged again, don't drop refs */ | 
 | 355 | 		if (backloged) | 
 | 356 | 			return 0; | 
 | 357 | 	} else { | 
 | 358 | 		sctp_inq_push(inqueue, chunk); | 
 | 359 | 	} | 
 | 360 |  | 
 | 361 | done: | 
 | 362 | 	/* Release the refs we took in sctp_add_backlog */ | 
 | 363 | 	if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 
 | 364 | 		sctp_association_put(sctp_assoc(rcvr)); | 
 | 365 | 	else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 
 | 366 | 		sctp_endpoint_put(sctp_ep(rcvr)); | 
 | 367 | 	else | 
 | 368 | 		BUG(); | 
 | 369 |  | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 370 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | } | 
 | 372 |  | 
| Zhu Yi | 50b1a78 | 2010-03-04 18:01:44 +0000 | [diff] [blame] | 373 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | 
| Sridhar Samudrala | c4d2444 | 2006-01-17 11:56:26 -0800 | [diff] [blame] | 374 | { | 
| Vladislav Yasevich | 61c9fed | 2006-05-19 11:01:18 -0700 | [diff] [blame] | 375 | 	struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 
 | 376 | 	struct sctp_ep_common *rcvr = chunk->rcvr; | 
| Zhu Yi | 50b1a78 | 2010-03-04 18:01:44 +0000 | [diff] [blame] | 377 | 	int ret; | 
| Sridhar Samudrala | c4d2444 | 2006-01-17 11:56:26 -0800 | [diff] [blame] | 378 |  | 
| Zhu Yi | a3a858f | 2010-03-04 18:01:47 +0000 | [diff] [blame] | 379 | 	ret = sk_add_backlog(sk, skb); | 
| Zhu Yi | 50b1a78 | 2010-03-04 18:01:44 +0000 | [diff] [blame] | 380 | 	if (!ret) { | 
 | 381 | 		/* Hold the assoc/ep while hanging on the backlog queue. | 
 | 382 | 		 * This way, we know structures we need will not disappear | 
 | 383 | 		 * from us | 
 | 384 | 		 */ | 
 | 385 | 		if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 
 | 386 | 			sctp_association_hold(sctp_assoc(rcvr)); | 
 | 387 | 		else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 
 | 388 | 			sctp_endpoint_hold(sctp_ep(rcvr)); | 
 | 389 | 		else | 
 | 390 | 			BUG(); | 
 | 391 | 	} | 
 | 392 | 	return ret; | 
| Sridhar Samudrala | c4d2444 | 2006-01-17 11:56:26 -0800 | [diff] [blame] | 393 |  | 
| Sridhar Samudrala | c4d2444 | 2006-01-17 11:56:26 -0800 | [diff] [blame] | 394 | } | 
 | 395 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | /* Handle icmp frag needed error. */ | 
 | 397 | void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, | 
 | 398 | 			   struct sctp_transport *t, __u32 pmtu) | 
 | 399 | { | 
| Wei Yongjun | 91bd6b1 | 2008-10-23 00:59:52 -0700 | [diff] [blame] | 400 | 	if (!t || (t->pathmtu <= pmtu)) | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 401 | 		return; | 
 | 402 |  | 
| Vlad Yasevich | 8a47949 | 2007-06-07 14:21:05 -0400 | [diff] [blame] | 403 | 	if (sock_owned_by_user(sk)) { | 
 | 404 | 		asoc->pmtu_pending = 1; | 
 | 405 | 		t->pmtu_pending = 1; | 
 | 406 | 		return; | 
 | 407 | 	} | 
 | 408 |  | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 409 | 	if (t->param_flags & SPP_PMTUD_ENABLE) { | 
| Vlad Yasevich | c910b47 | 2007-06-07 13:47:03 -0400 | [diff] [blame] | 410 | 		/* Update transports view of the MTU */ | 
 | 411 | 		sctp_transport_update_pmtu(t, pmtu); | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 412 |  | 
 | 413 | 		/* Update association pmtu. */ | 
 | 414 | 		sctp_assoc_sync_pmtu(asoc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | 	} | 
 | 416 |  | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 417 | 	/* Retransmit with the new pmtu setting. | 
 | 418 | 	 * Normally, if PMTU discovery is disabled, an ICMP Fragmentation | 
 | 419 | 	 * Needed will never be sent, but if a message was sent before | 
 | 420 | 	 * PMTU discovery was disabled that was larger than the PMTU, it | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 421 | 	 * would not be fragmented, so it must be re-transmitted fragmented. | 
| Frank Filz | 52ccb8e | 2005-12-22 11:36:46 -0800 | [diff] [blame] | 422 | 	 */ | 
 | 423 | 	sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | } | 
 | 425 |  | 
 | 426 | /* | 
 | 427 |  * SCTP Implementer's Guide, 2.37 ICMP handling procedures | 
 | 428 |  * | 
 | 429 |  * ICMP8) If the ICMP code is a "Unrecognized next header type encountered" | 
 | 430 |  *        or a "Protocol Unreachable" treat this message as an abort | 
 | 431 |  *        with the T bit set. | 
 | 432 |  * | 
 | 433 |  * This function sends an event to the state machine, which will abort the | 
 | 434 |  * association. | 
 | 435 |  * | 
 | 436 |  */ | 
 | 437 | void sctp_icmp_proto_unreachable(struct sock *sk, | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 438 | 			   struct sctp_association *asoc, | 
 | 439 | 			   struct sctp_transport *t) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | { | 
| Harvey Harrison | 0dc4787 | 2008-03-05 20:47:47 -0800 | [diff] [blame] | 441 | 	SCTP_DEBUG_PRINTK("%s\n",  __func__); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 |  | 
| Vlad Yasevich | 50b5d6a | 2010-05-06 00:56:07 -0700 | [diff] [blame] | 443 | 	if (sock_owned_by_user(sk)) { | 
 | 444 | 		if (timer_pending(&t->proto_unreach_timer)) | 
 | 445 | 			return; | 
 | 446 | 		else { | 
 | 447 | 			if (!mod_timer(&t->proto_unreach_timer, | 
 | 448 | 						jiffies + (HZ/20))) | 
 | 449 | 				sctp_association_hold(asoc); | 
 | 450 | 		} | 
 | 451 | 			 | 
 | 452 | 	} else { | 
 | 453 | 		if (timer_pending(&t->proto_unreach_timer) && | 
 | 454 | 		    del_timer(&t->proto_unreach_timer)) | 
 | 455 | 			sctp_association_put(asoc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 |  | 
| Vlad Yasevich | 50b5d6a | 2010-05-06 00:56:07 -0700 | [diff] [blame] | 457 | 		sctp_do_sm(SCTP_EVENT_T_OTHER, | 
 | 458 | 			   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | 
 | 459 | 			   asoc->state, asoc->ep, asoc, t, | 
 | 460 | 			   GFP_ATOMIC); | 
 | 461 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | } | 
 | 463 |  | 
 | 464 | /* Common lookup code for icmp/icmpv6 error handler. */ | 
 | 465 | struct sock *sctp_err_lookup(int family, struct sk_buff *skb, | 
 | 466 | 			     struct sctphdr *sctphdr, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | 			     struct sctp_association **app, | 
 | 468 | 			     struct sctp_transport **tpp) | 
 | 469 | { | 
 | 470 | 	union sctp_addr saddr; | 
 | 471 | 	union sctp_addr daddr; | 
 | 472 | 	struct sctp_af *af; | 
 | 473 | 	struct sock *sk = NULL; | 
| Sridhar Samudrala | 8de8c87 | 2006-05-19 10:58:12 -0700 | [diff] [blame] | 474 | 	struct sctp_association *asoc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | 	struct sctp_transport *transport = NULL; | 
| Wei Yongjun | 7115e63 | 2008-06-19 16:07:48 -0700 | [diff] [blame] | 476 | 	struct sctp_init_chunk *chunkhdr; | 
 | 477 | 	__u32 vtag = ntohl(sctphdr->vtag); | 
 | 478 | 	int len = skb->len - ((void *)sctphdr - (void *)skb->data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 |  | 
| Sridhar Samudrala | d1ad1ff | 2005-07-18 13:44:10 -0700 | [diff] [blame] | 480 | 	*app = NULL; *tpp = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 |  | 
 | 482 | 	af = sctp_get_af_specific(family); | 
 | 483 | 	if (unlikely(!af)) { | 
 | 484 | 		return NULL; | 
 | 485 | 	} | 
 | 486 |  | 
 | 487 | 	/* Initialize local addresses for lookups. */ | 
 | 488 | 	af->from_skb(&saddr, skb, 1); | 
 | 489 | 	af->from_skb(&daddr, skb, 0); | 
 | 490 |  | 
 | 491 | 	/* Look for an association that matches the incoming ICMP error | 
 | 492 | 	 * packet. | 
 | 493 | 	 */ | 
| Al Viro | d55c41b | 2006-11-20 17:09:40 -0800 | [diff] [blame] | 494 | 	asoc = __sctp_lookup_association(&saddr, &daddr, &transport); | 
| Sridhar Samudrala | d1ad1ff | 2005-07-18 13:44:10 -0700 | [diff] [blame] | 495 | 	if (!asoc) | 
 | 496 | 		return NULL; | 
 | 497 |  | 
 | 498 | 	sk = asoc->base.sk; | 
 | 499 |  | 
| Wei Yongjun | 7115e63 | 2008-06-19 16:07:48 -0700 | [diff] [blame] | 500 | 	/* RFC 4960, Appendix C. ICMP Handling | 
 | 501 | 	 * | 
 | 502 | 	 * ICMP6) An implementation MUST validate that the Verification Tag | 
 | 503 | 	 * contained in the ICMP message matches the Verification Tag of | 
 | 504 | 	 * the peer.  If the Verification Tag is not 0 and does NOT | 
 | 505 | 	 * match, discard the ICMP message.  If it is 0 and the ICMP | 
 | 506 | 	 * message contains enough bytes to verify that the chunk type is | 
 | 507 | 	 * an INIT chunk and that the Initiate Tag matches the tag of the | 
 | 508 | 	 * peer, continue with ICMP7.  If the ICMP message is too short | 
 | 509 | 	 * or the chunk type or the Initiate Tag does not match, silently | 
 | 510 | 	 * discard the packet. | 
 | 511 | 	 */ | 
 | 512 | 	if (vtag == 0) { | 
| Joe Perches | ea11073 | 2011-06-13 16:21:26 +0000 | [diff] [blame] | 513 | 		chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); | 
| Wei Yongjun | 7115e63 | 2008-06-19 16:07:48 -0700 | [diff] [blame] | 514 | 		if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) | 
 | 515 | 			  + sizeof(__be32) || | 
 | 516 | 		    chunkhdr->chunk_hdr.type != SCTP_CID_INIT || | 
 | 517 | 		    ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { | 
 | 518 | 			goto out; | 
 | 519 | 		} | 
 | 520 | 	} else if (vtag != asoc->c.peer_vtag) { | 
| Sridhar Samudrala | d1ad1ff | 2005-07-18 13:44:10 -0700 | [diff] [blame] | 521 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | 	} | 
 | 523 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | 	sctp_bh_lock_sock(sk); | 
 | 525 |  | 
 | 526 | 	/* If too many ICMPs get dropped on busy | 
 | 527 | 	 * servers this needs to be solved differently. | 
 | 528 | 	 */ | 
 | 529 | 	if (sock_owned_by_user(sk)) | 
| Pavel Emelyanov | de0744a | 2008-07-16 20:31:16 -0700 | [diff] [blame] | 530 | 		NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | 	*app = asoc; | 
 | 533 | 	*tpp = transport; | 
 | 534 | 	return sk; | 
 | 535 |  | 
 | 536 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | 	if (asoc) | 
 | 538 | 		sctp_association_put(asoc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | 	return NULL; | 
 | 540 | } | 
 | 541 |  | 
 | 542 | /* Common cleanup code for icmp/icmpv6 error handler. */ | 
| Sridhar Samudrala | d1ad1ff | 2005-07-18 13:44:10 -0700 | [diff] [blame] | 543 | void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | { | 
 | 545 | 	sctp_bh_unlock_sock(sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | 	if (asoc) | 
 | 547 | 		sctp_association_put(asoc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | } | 
 | 549 |  | 
 | 550 | /* | 
 | 551 |  * This routine is called by the ICMP module when it gets some | 
 | 552 |  * sort of error condition.  If err < 0 then the socket should | 
 | 553 |  * be closed and the error returned to the user.  If err > 0 | 
 | 554 |  * it's just the icmp type << 8 | icmp code.  After adjustment | 
 | 555 |  * header points to the first 8 bytes of the sctp header.  We need | 
 | 556 |  * to find the appropriate port. | 
 | 557 |  * | 
 | 558 |  * The locking strategy used here is very "optimistic". When | 
 | 559 |  * someone else accesses the socket the ICMP is just dropped | 
 | 560 |  * and for some paths there is no check at all. | 
 | 561 |  * A more general error queue to queue errors for later handling | 
 | 562 |  * is probably better. | 
 | 563 |  * | 
 | 564 |  */ | 
 | 565 | void sctp_v4_err(struct sk_buff *skb, __u32 info) | 
 | 566 | { | 
| Eric Dumazet | b71d1d4 | 2011-04-22 04:53:02 +0000 | [diff] [blame] | 567 | 	const struct iphdr *iph = (const struct iphdr *)skb->data; | 
| Arnaldo Carvalho de Melo | a27ef74 | 2007-03-13 17:17:10 -0300 | [diff] [blame] | 568 | 	const int ihlen = iph->ihl * 4; | 
| Arnaldo Carvalho de Melo | 88c7664 | 2007-03-13 14:43:18 -0300 | [diff] [blame] | 569 | 	const int type = icmp_hdr(skb)->type; | 
 | 570 | 	const int code = icmp_hdr(skb)->code; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | 	struct sock *sk; | 
| Sridhar Samudrala | 8de8c87 | 2006-05-19 10:58:12 -0700 | [diff] [blame] | 572 | 	struct sctp_association *asoc = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | 	struct sctp_transport *transport; | 
 | 574 | 	struct inet_sock *inet; | 
| Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 575 | 	sk_buff_data_t saveip, savesctp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | 	int err; | 
 | 577 |  | 
| Arnaldo Carvalho de Melo | a27ef74 | 2007-03-13 17:17:10 -0300 | [diff] [blame] | 578 | 	if (skb->len < ihlen + 8) { | 
| Pavel Emelyanov | dcfc23c | 2008-07-14 23:03:00 -0700 | [diff] [blame] | 579 | 		ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | 		return; | 
 | 581 | 	} | 
 | 582 |  | 
 | 583 | 	/* Fix up skb to look at the embedded net header. */ | 
| Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 584 | 	saveip = skb->network_header; | 
 | 585 | 	savesctp = skb->transport_header; | 
| Arnaldo Carvalho de Melo | 31c7711 | 2007-03-10 19:04:55 -0300 | [diff] [blame] | 586 | 	skb_reset_network_header(skb); | 
| Arnaldo Carvalho de Melo | a27ef74 | 2007-03-13 17:17:10 -0300 | [diff] [blame] | 587 | 	skb_set_transport_header(skb, ihlen); | 
 | 588 | 	sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); | 
| Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 589 | 	/* Put back, the original values. */ | 
| Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 590 | 	skb->network_header = saveip; | 
 | 591 | 	skb->transport_header = savesctp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | 	if (!sk) { | 
| Pavel Emelyanov | dcfc23c | 2008-07-14 23:03:00 -0700 | [diff] [blame] | 593 | 		ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | 		return; | 
 | 595 | 	} | 
 | 596 | 	/* Warning:  The sock lock is held.  Remember to call | 
 | 597 | 	 * sctp_err_finish! | 
 | 598 | 	 */ | 
 | 599 |  | 
 | 600 | 	switch (type) { | 
 | 601 | 	case ICMP_PARAMETERPROB: | 
 | 602 | 		err = EPROTO; | 
 | 603 | 		break; | 
 | 604 | 	case ICMP_DEST_UNREACH: | 
 | 605 | 		if (code > NR_ICMP_UNREACH) | 
 | 606 | 			goto out_unlock; | 
 | 607 |  | 
 | 608 | 		/* PMTU discovery (RFC1191) */ | 
 | 609 | 		if (ICMP_FRAG_NEEDED == code) { | 
 | 610 | 			sctp_icmp_frag_needed(sk, asoc, transport, info); | 
 | 611 | 			goto out_unlock; | 
 | 612 | 		} | 
 | 613 | 		else { | 
 | 614 | 			if (ICMP_PROT_UNREACH == code) { | 
| Sridhar Samudrala | d1ad1ff | 2005-07-18 13:44:10 -0700 | [diff] [blame] | 615 | 				sctp_icmp_proto_unreachable(sk, asoc, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | 							    transport); | 
 | 617 | 				goto out_unlock; | 
 | 618 | 			} | 
 | 619 | 		} | 
 | 620 | 		err = icmp_err_convert[code].errno; | 
 | 621 | 		break; | 
 | 622 | 	case ICMP_TIME_EXCEEDED: | 
 | 623 | 		/* Ignore any time exceeded errors due to fragment reassembly | 
 | 624 | 		 * timeouts. | 
 | 625 | 		 */ | 
 | 626 | 		if (ICMP_EXC_FRAGTIME == code) | 
 | 627 | 			goto out_unlock; | 
 | 628 |  | 
 | 629 | 		err = EHOSTUNREACH; | 
 | 630 | 		break; | 
 | 631 | 	default: | 
 | 632 | 		goto out_unlock; | 
 | 633 | 	} | 
 | 634 |  | 
 | 635 | 	inet = inet_sk(sk); | 
 | 636 | 	if (!sock_owned_by_user(sk) && inet->recverr) { | 
 | 637 | 		sk->sk_err = err; | 
 | 638 | 		sk->sk_error_report(sk); | 
 | 639 | 	} else {  /* Only an error on timeout */ | 
 | 640 | 		sk->sk_err_soft = err; | 
 | 641 | 	} | 
 | 642 |  | 
 | 643 | out_unlock: | 
| Sridhar Samudrala | d1ad1ff | 2005-07-18 13:44:10 -0700 | [diff] [blame] | 644 | 	sctp_err_finish(sk, asoc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | } | 
 | 646 |  | 
 | 647 | /* | 
 | 648 |  * RFC 2960, 8.4 - Handle "Out of the blue" Packets. | 
 | 649 |  * | 
 | 650 |  * This function scans all the chunks in the OOTB packet to determine if | 
 | 651 |  * the packet should be discarded right away.  If a response might be needed | 
 | 652 |  * for this packet, or, if further processing is possible, the packet will | 
 | 653 |  * be queued to a proper inqueue for the next phase of handling. | 
 | 654 |  * | 
 | 655 |  * Output: | 
 | 656 |  * Return 0 - If further processing is needed. | 
 | 657 |  * Return 1 - If the packet can be discarded right away. | 
 | 658 |  */ | 
| sebastian@breakpoint.cc | 0467521 | 2007-07-26 23:21:31 +0200 | [diff] [blame] | 659 | static int sctp_rcv_ootb(struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | { | 
 | 661 | 	sctp_chunkhdr_t *ch; | 
 | 662 | 	__u8 *ch_end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 |  | 
 | 664 | 	ch = (sctp_chunkhdr_t *) skb->data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 |  | 
 | 666 | 	/* Scan through all the chunks in the packet.  */ | 
| Tsutomu Fujii | a7d1f1b | 2006-01-17 11:57:09 -0800 | [diff] [blame] | 667 | 	do { | 
 | 668 | 		/* Break out if chunk length is less then minimal. */ | 
 | 669 | 		if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) | 
 | 670 | 			break; | 
 | 671 |  | 
 | 672 | 		ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 673 | 		if (ch_end > skb_tail_pointer(skb)) | 
| Tsutomu Fujii | a7d1f1b | 2006-01-17 11:57:09 -0800 | [diff] [blame] | 674 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 |  | 
 | 676 | 		/* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the | 
 | 677 | 		 * receiver MUST silently discard the OOTB packet and take no | 
 | 678 | 		 * further action. | 
 | 679 | 		 */ | 
 | 680 | 		if (SCTP_CID_ABORT == ch->type) | 
 | 681 | 			goto discard; | 
 | 682 |  | 
 | 683 | 		/* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE | 
 | 684 | 		 * chunk, the receiver should silently discard the packet | 
 | 685 | 		 * and take no further action. | 
 | 686 | 		 */ | 
 | 687 | 		if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) | 
 | 688 | 			goto discard; | 
 | 689 |  | 
| Vlad Yasevich | 3c77f96 | 2007-09-17 15:14:28 -0400 | [diff] [blame] | 690 | 		/* RFC 4460, 2.11.2 | 
 | 691 | 		 * This will discard packets with INIT chunk bundled as | 
 | 692 | 		 * subsequent chunks in the packet.  When INIT is first, | 
 | 693 | 		 * the normal INIT processing will discard the chunk. | 
 | 694 | 		 */ | 
 | 695 | 		if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) | 
 | 696 | 			goto discard; | 
 | 697 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | 		ch = (sctp_chunkhdr_t *) ch_end; | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 699 | 	} while (ch_end < skb_tail_pointer(skb)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 |  | 
 | 701 | 	return 0; | 
 | 702 |  | 
 | 703 | discard: | 
 | 704 | 	return 1; | 
 | 705 | } | 
 | 706 |  | 
 | 707 | /* Insert endpoint into the hash table.  */ | 
 | 708 | static void __sctp_hash_endpoint(struct sctp_endpoint *ep) | 
 | 709 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | 	struct sctp_ep_common *epb; | 
 | 711 | 	struct sctp_hashbucket *head; | 
 | 712 |  | 
 | 713 | 	epb = &ep->base; | 
 | 714 |  | 
 | 715 | 	epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); | 
 | 716 | 	head = &sctp_ep_hashtable[epb->hashent]; | 
 | 717 |  | 
 | 718 | 	sctp_write_lock(&head->lock); | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 719 | 	hlist_add_head(&epb->node, &head->chain); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | 	sctp_write_unlock(&head->lock); | 
 | 721 | } | 
 | 722 |  | 
 | 723 | /* Add an endpoint to the hash. Local BH-safe. */ | 
 | 724 | void sctp_hash_endpoint(struct sctp_endpoint *ep) | 
 | 725 | { | 
 | 726 | 	sctp_local_bh_disable(); | 
 | 727 | 	__sctp_hash_endpoint(ep); | 
 | 728 | 	sctp_local_bh_enable(); | 
 | 729 | } | 
 | 730 |  | 
 | 731 | /* Remove endpoint from the hash table.  */ | 
 | 732 | static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) | 
 | 733 | { | 
 | 734 | 	struct sctp_hashbucket *head; | 
 | 735 | 	struct sctp_ep_common *epb; | 
 | 736 |  | 
 | 737 | 	epb = &ep->base; | 
 | 738 |  | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 739 | 	if (hlist_unhashed(&epb->node)) | 
 | 740 | 		return; | 
 | 741 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | 	epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); | 
 | 743 |  | 
 | 744 | 	head = &sctp_ep_hashtable[epb->hashent]; | 
 | 745 |  | 
 | 746 | 	sctp_write_lock(&head->lock); | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 747 | 	__hlist_del(&epb->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | 	sctp_write_unlock(&head->lock); | 
 | 749 | } | 
 | 750 |  | 
 | 751 | /* Remove endpoint from the hash.  Local BH-safe. */ | 
 | 752 | void sctp_unhash_endpoint(struct sctp_endpoint *ep) | 
 | 753 | { | 
 | 754 | 	sctp_local_bh_disable(); | 
 | 755 | 	__sctp_unhash_endpoint(ep); | 
 | 756 | 	sctp_local_bh_enable(); | 
 | 757 | } | 
 | 758 |  | 
 | 759 | /* Look up an endpoint. */ | 
 | 760 | static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr) | 
 | 761 | { | 
 | 762 | 	struct sctp_hashbucket *head; | 
 | 763 | 	struct sctp_ep_common *epb; | 
 | 764 | 	struct sctp_endpoint *ep; | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 765 | 	struct hlist_node *node; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | 	int hash; | 
 | 767 |  | 
| Al Viro | 1c7d1fc | 2006-11-20 17:08:09 -0800 | [diff] [blame] | 768 | 	hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | 	head = &sctp_ep_hashtable[hash]; | 
 | 770 | 	read_lock(&head->lock); | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 771 | 	sctp_for_each_hentry(epb, node, &head->chain) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | 		ep = sctp_ep(epb); | 
 | 773 | 		if (sctp_endpoint_is_match(ep, laddr)) | 
 | 774 | 			goto hit; | 
 | 775 | 	} | 
 | 776 |  | 
 | 777 | 	ep = sctp_sk((sctp_get_ctl_sock()))->ep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 |  | 
 | 779 | hit: | 
 | 780 | 	sctp_endpoint_hold(ep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | 	read_unlock(&head->lock); | 
 | 782 | 	return ep; | 
 | 783 | } | 
 | 784 |  | 
 | 785 | /* Insert association into the hash table.  */ | 
 | 786 | static void __sctp_hash_established(struct sctp_association *asoc) | 
 | 787 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | 	struct sctp_ep_common *epb; | 
 | 789 | 	struct sctp_hashbucket *head; | 
 | 790 |  | 
 | 791 | 	epb = &asoc->base; | 
 | 792 |  | 
 | 793 | 	/* Calculate which chain this entry will belong to. */ | 
 | 794 | 	epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port); | 
 | 795 |  | 
 | 796 | 	head = &sctp_assoc_hashtable[epb->hashent]; | 
 | 797 |  | 
 | 798 | 	sctp_write_lock(&head->lock); | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 799 | 	hlist_add_head(&epb->node, &head->chain); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | 	sctp_write_unlock(&head->lock); | 
 | 801 | } | 
 | 802 |  | 
 | 803 | /* Add an association to the hash. Local BH-safe. */ | 
 | 804 | void sctp_hash_established(struct sctp_association *asoc) | 
 | 805 | { | 
| Vlad Yasevich | de76e69 | 2006-10-30 18:55:11 -0800 | [diff] [blame] | 806 | 	if (asoc->temp) | 
 | 807 | 		return; | 
 | 808 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | 	sctp_local_bh_disable(); | 
 | 810 | 	__sctp_hash_established(asoc); | 
 | 811 | 	sctp_local_bh_enable(); | 
 | 812 | } | 
 | 813 |  | 
 | 814 | /* Remove association from the hash table.  */ | 
 | 815 | static void __sctp_unhash_established(struct sctp_association *asoc) | 
 | 816 | { | 
 | 817 | 	struct sctp_hashbucket *head; | 
 | 818 | 	struct sctp_ep_common *epb; | 
 | 819 |  | 
 | 820 | 	epb = &asoc->base; | 
 | 821 |  | 
 | 822 | 	epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, | 
 | 823 | 					 asoc->peer.port); | 
 | 824 |  | 
 | 825 | 	head = &sctp_assoc_hashtable[epb->hashent]; | 
 | 826 |  | 
 | 827 | 	sctp_write_lock(&head->lock); | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 828 | 	__hlist_del(&epb->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | 	sctp_write_unlock(&head->lock); | 
 | 830 | } | 
 | 831 |  | 
 | 832 | /* Remove association from the hash table.  Local BH-safe. */ | 
 | 833 | void sctp_unhash_established(struct sctp_association *asoc) | 
 | 834 | { | 
| Vlad Yasevich | de76e69 | 2006-10-30 18:55:11 -0800 | [diff] [blame] | 835 | 	if (asoc->temp) | 
 | 836 | 		return; | 
 | 837 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | 	sctp_local_bh_disable(); | 
 | 839 | 	__sctp_unhash_established(asoc); | 
 | 840 | 	sctp_local_bh_enable(); | 
 | 841 | } | 
 | 842 |  | 
 | 843 | /* Look up an association. */ | 
 | 844 | static struct sctp_association *__sctp_lookup_association( | 
 | 845 | 					const union sctp_addr *local, | 
 | 846 | 					const union sctp_addr *peer, | 
 | 847 | 					struct sctp_transport **pt) | 
 | 848 | { | 
 | 849 | 	struct sctp_hashbucket *head; | 
 | 850 | 	struct sctp_ep_common *epb; | 
 | 851 | 	struct sctp_association *asoc; | 
 | 852 | 	struct sctp_transport *transport; | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 853 | 	struct hlist_node *node; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | 	int hash; | 
 | 855 |  | 
 | 856 | 	/* Optimize here for direct hit, only listening connections can | 
 | 857 | 	 * have wildcards anyways. | 
 | 858 | 	 */ | 
| Al Viro | e2fcced | 2006-11-20 17:08:41 -0800 | [diff] [blame] | 859 | 	hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | 	head = &sctp_assoc_hashtable[hash]; | 
 | 861 | 	read_lock(&head->lock); | 
| Vlad Yasevich | d970dbf | 2007-11-09 11:43:40 -0500 | [diff] [blame] | 862 | 	sctp_for_each_hentry(epb, node, &head->chain) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | 		asoc = sctp_assoc(epb); | 
 | 864 | 		transport = sctp_assoc_is_match(asoc, local, peer); | 
 | 865 | 		if (transport) | 
 | 866 | 			goto hit; | 
 | 867 | 	} | 
 | 868 |  | 
 | 869 | 	read_unlock(&head->lock); | 
 | 870 |  | 
 | 871 | 	return NULL; | 
 | 872 |  | 
 | 873 | hit: | 
 | 874 | 	*pt = transport; | 
 | 875 | 	sctp_association_hold(asoc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | 	read_unlock(&head->lock); | 
 | 877 | 	return asoc; | 
 | 878 | } | 
 | 879 |  | 
 | 880 | /* Look up an association. BH-safe. */ | 
 | 881 | SCTP_STATIC | 
 | 882 | struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr, | 
 | 883 | 						 const union sctp_addr *paddr, | 
 | 884 | 					    struct sctp_transport **transportp) | 
 | 885 | { | 
 | 886 | 	struct sctp_association *asoc; | 
 | 887 |  | 
 | 888 | 	sctp_local_bh_disable(); | 
 | 889 | 	asoc = __sctp_lookup_association(laddr, paddr, transportp); | 
 | 890 | 	sctp_local_bh_enable(); | 
 | 891 |  | 
 | 892 | 	return asoc; | 
 | 893 | } | 
 | 894 |  | 
 | 895 | /* Is there an association matching the given local and peer addresses? */ | 
 | 896 | int sctp_has_association(const union sctp_addr *laddr, | 
 | 897 | 			 const union sctp_addr *paddr) | 
 | 898 | { | 
 | 899 | 	struct sctp_association *asoc; | 
 | 900 | 	struct sctp_transport *transport; | 
 | 901 |  | 
| Al Viro | 6c7be55 | 2006-11-20 17:11:50 -0800 | [diff] [blame] | 902 | 	if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | 		sctp_association_put(asoc); | 
 | 904 | 		return 1; | 
 | 905 | 	} | 
 | 906 |  | 
 | 907 | 	return 0; | 
 | 908 | } | 
 | 909 |  | 
 | 910 | /* | 
 | 911 |  * SCTP Implementors Guide, 2.18 Handling of address | 
 | 912 |  * parameters within the INIT or INIT-ACK. | 
 | 913 |  * | 
 | 914 |  * D) When searching for a matching TCB upon reception of an INIT | 
 | 915 |  *    or INIT-ACK chunk the receiver SHOULD use not only the | 
 | 916 |  *    source address of the packet (containing the INIT or | 
 | 917 |  *    INIT-ACK) but the receiver SHOULD also use all valid | 
 | 918 |  *    address parameters contained within the chunk. | 
 | 919 |  * | 
 | 920 |  * 2.18.3 Solution description | 
 | 921 |  * | 
 | 922 |  * This new text clearly specifies to an implementor the need | 
 | 923 |  * to look within the INIT or INIT-ACK. Any implementation that | 
 | 924 |  * does not do this, may not be able to establish associations | 
 | 925 |  * in certain circumstances. | 
 | 926 |  * | 
 | 927 |  */ | 
 | 928 | static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, | 
 | 929 | 	const union sctp_addr *laddr, struct sctp_transport **transportp) | 
 | 930 | { | 
 | 931 | 	struct sctp_association *asoc; | 
 | 932 | 	union sctp_addr addr; | 
 | 933 | 	union sctp_addr *paddr = &addr; | 
| Arnaldo Carvalho de Melo | 2c0fd38 | 2007-03-13 13:59:32 -0300 | [diff] [blame] | 934 | 	struct sctphdr *sh = sctp_hdr(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | 	union sctp_params params; | 
 | 936 | 	sctp_init_chunk_t *init; | 
 | 937 | 	struct sctp_transport *transport; | 
 | 938 | 	struct sctp_af *af; | 
 | 939 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | 	/* | 
 | 941 | 	 * This code will NOT touch anything inside the chunk--it is | 
 | 942 | 	 * strictly READ-ONLY. | 
 | 943 | 	 * | 
 | 944 | 	 * RFC 2960 3  SCTP packet Format | 
 | 945 | 	 * | 
 | 946 | 	 * Multiple chunks can be bundled into one SCTP packet up to | 
 | 947 | 	 * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN | 
 | 948 | 	 * COMPLETE chunks.  These chunks MUST NOT be bundled with any | 
 | 949 | 	 * other chunk in a packet.  See Section 6.10 for more details | 
 | 950 | 	 * on chunk bundling. | 
 | 951 | 	 */ | 
 | 952 |  | 
 | 953 | 	/* Find the start of the TLVs and the end of the chunk.  This is | 
 | 954 | 	 * the region we search for address parameters. | 
 | 955 | 	 */ | 
 | 956 | 	init = (sctp_init_chunk_t *)skb->data; | 
 | 957 |  | 
 | 958 | 	/* Walk the parameters looking for embedded addresses. */ | 
 | 959 | 	sctp_walk_params(params, init, init_hdr.params) { | 
 | 960 |  | 
 | 961 | 		/* Note: Ignoring hostname addresses. */ | 
 | 962 | 		af = sctp_get_af_specific(param_type2af(params.p->type)); | 
 | 963 | 		if (!af) | 
 | 964 | 			continue; | 
 | 965 |  | 
| Al Viro | dd86d13 | 2006-11-20 17:11:13 -0800 | [diff] [blame] | 966 | 		af->from_addr_param(paddr, params.addr, sh->source, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 |  | 
| Al Viro | dd86d13 | 2006-11-20 17:11:13 -0800 | [diff] [blame] | 968 | 		asoc = __sctp_lookup_association(laddr, paddr, &transport); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | 		if (asoc) | 
 | 970 | 			return asoc; | 
 | 971 | 	} | 
 | 972 |  | 
 | 973 | 	return NULL; | 
 | 974 | } | 
 | 975 |  | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 976 | /* ADD-IP, Section 5.2 | 
 | 977 |  * When an endpoint receives an ASCONF Chunk from the remote peer | 
 | 978 |  * special procedures may be needed to identify the association the | 
 | 979 |  * ASCONF Chunk is associated with. To properly find the association | 
 | 980 |  * the following procedures SHOULD be followed: | 
 | 981 |  * | 
 | 982 |  * D2) If the association is not found, use the address found in the | 
 | 983 |  * Address Parameter TLV combined with the port number found in the | 
 | 984 |  * SCTP common header. If found proceed to rule D4. | 
 | 985 |  * | 
 | 986 |  * D2-ext) If more than one ASCONF Chunks are packed together, use the | 
 | 987 |  * address found in the ASCONF Address Parameter TLV of each of the | 
 | 988 |  * subsequent ASCONF Chunks. If found, proceed to rule D4. | 
 | 989 |  */ | 
 | 990 | static struct sctp_association *__sctp_rcv_asconf_lookup( | 
 | 991 | 					sctp_chunkhdr_t *ch, | 
 | 992 | 					const union sctp_addr *laddr, | 
| Al Viro | bc92dd1 | 2008-03-17 22:47:32 -0700 | [diff] [blame] | 993 | 					__be16 peer_port, | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 994 | 					struct sctp_transport **transportp) | 
 | 995 | { | 
 | 996 | 	sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch; | 
 | 997 | 	struct sctp_af *af; | 
 | 998 | 	union sctp_addr_param *param; | 
 | 999 | 	union sctp_addr paddr; | 
 | 1000 |  | 
 | 1001 | 	/* Skip over the ADDIP header and find the Address parameter */ | 
 | 1002 | 	param = (union sctp_addr_param *)(asconf + 1); | 
 | 1003 |  | 
| Shan Wei | 6a43573 | 2011-04-18 19:11:47 +0000 | [diff] [blame] | 1004 | 	af = sctp_get_af_specific(param_type2af(param->p.type)); | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 1005 | 	if (unlikely(!af)) | 
 | 1006 | 		return NULL; | 
 | 1007 |  | 
 | 1008 | 	af->from_addr_param(&paddr, param, peer_port, 0); | 
 | 1009 |  | 
 | 1010 | 	return __sctp_lookup_association(laddr, &paddr, transportp); | 
 | 1011 | } | 
 | 1012 |  | 
 | 1013 |  | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1014 | /* SCTP-AUTH, Section 6.3: | 
 | 1015 | *    If the receiver does not find a STCB for a packet containing an AUTH | 
 | 1016 | *    chunk as the first chunk and not a COOKIE-ECHO chunk as the second | 
 | 1017 | *    chunk, it MUST use the chunks after the AUTH chunk to look up an existing | 
 | 1018 | *    association. | 
 | 1019 | * | 
 | 1020 | * This means that any chunks that can help us identify the association need | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1021 | * to be looked at to find this association. | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1022 | */ | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 1023 | static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb, | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1024 | 				      const union sctp_addr *laddr, | 
 | 1025 | 				      struct sctp_transport **transportp) | 
 | 1026 | { | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 1027 | 	struct sctp_association *asoc = NULL; | 
 | 1028 | 	sctp_chunkhdr_t *ch; | 
 | 1029 | 	int have_auth = 0; | 
 | 1030 | 	unsigned int chunk_num = 1; | 
 | 1031 | 	__u8 *ch_end; | 
 | 1032 |  | 
 | 1033 | 	/* Walk through the chunks looking for AUTH or ASCONF chunks | 
 | 1034 | 	 * to help us find the association. | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1035 | 	 */ | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 1036 | 	ch = (sctp_chunkhdr_t *) skb->data; | 
 | 1037 | 	do { | 
 | 1038 | 		/* Break out if chunk length is less then minimal. */ | 
 | 1039 | 		if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) | 
 | 1040 | 			break; | 
 | 1041 |  | 
 | 1042 | 		ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | 
 | 1043 | 		if (ch_end > skb_tail_pointer(skb)) | 
 | 1044 | 			break; | 
 | 1045 |  | 
 | 1046 | 		switch(ch->type) { | 
 | 1047 | 		    case SCTP_CID_AUTH: | 
 | 1048 | 			    have_auth = chunk_num; | 
 | 1049 | 			    break; | 
 | 1050 |  | 
 | 1051 | 		    case SCTP_CID_COOKIE_ECHO: | 
 | 1052 | 			    /* If a packet arrives containing an AUTH chunk as | 
 | 1053 | 			     * a first chunk, a COOKIE-ECHO chunk as the second | 
 | 1054 | 			     * chunk, and possibly more chunks after them, and | 
 | 1055 | 			     * the receiver does not have an STCB for that | 
 | 1056 | 			     * packet, then authentication is based on | 
 | 1057 | 			     * the contents of the COOKIE- ECHO chunk. | 
 | 1058 | 			     */ | 
 | 1059 | 			    if (have_auth == 1 && chunk_num == 2) | 
 | 1060 | 				    return NULL; | 
 | 1061 | 			    break; | 
 | 1062 |  | 
 | 1063 | 		    case SCTP_CID_ASCONF: | 
 | 1064 | 			    if (have_auth || sctp_addip_noauth) | 
 | 1065 | 				    asoc = __sctp_rcv_asconf_lookup(ch, laddr, | 
 | 1066 | 							sctp_hdr(skb)->source, | 
 | 1067 | 							transportp); | 
 | 1068 | 		    default: | 
 | 1069 | 			    break; | 
 | 1070 | 		} | 
 | 1071 |  | 
 | 1072 | 		if (asoc) | 
 | 1073 | 			break; | 
 | 1074 |  | 
 | 1075 | 		ch = (sctp_chunkhdr_t *) ch_end; | 
 | 1076 | 		chunk_num++; | 
 | 1077 | 	} while (ch_end < skb_tail_pointer(skb)); | 
 | 1078 |  | 
 | 1079 | 	return asoc; | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1080 | } | 
 | 1081 |  | 
 | 1082 | /* | 
 | 1083 |  * There are circumstances when we need to look inside the SCTP packet | 
 | 1084 |  * for information to help us find the association.   Examples | 
 | 1085 |  * include looking inside of INIT/INIT-ACK chunks or after the AUTH | 
 | 1086 |  * chunks. | 
 | 1087 |  */ | 
 | 1088 | static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb, | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1089 | 				      const union sctp_addr *laddr, | 
 | 1090 | 				      struct sctp_transport **transportp) | 
 | 1091 | { | 
 | 1092 | 	sctp_chunkhdr_t *ch; | 
 | 1093 |  | 
 | 1094 | 	ch = (sctp_chunkhdr_t *) skb->data; | 
 | 1095 |  | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 1096 | 	/* The code below will attempt to walk the chunk and extract | 
 | 1097 | 	 * parameter information.  Before we do that, we need to verify | 
 | 1098 | 	 * that the chunk length doesn't cause overflow.  Otherwise, we'll | 
 | 1099 | 	 * walk off the end. | 
 | 1100 | 	 */ | 
 | 1101 | 	if (WORD_ROUND(ntohs(ch->length)) > skb->len) | 
 | 1102 | 		return NULL; | 
 | 1103 |  | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1104 | 	/* If this is INIT/INIT-ACK look inside the chunk too. */ | 
 | 1105 | 	switch (ch->type) { | 
 | 1106 | 	case SCTP_CID_INIT: | 
 | 1107 | 	case SCTP_CID_INIT_ACK: | 
 | 1108 | 		return __sctp_rcv_init_lookup(skb, laddr, transportp); | 
 | 1109 | 		break; | 
 | 1110 |  | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 1111 | 	default: | 
 | 1112 | 		return __sctp_rcv_walk_lookup(skb, laddr, transportp); | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1113 | 		break; | 
 | 1114 | 	} | 
 | 1115 |  | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 1116 |  | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 1117 | 	return NULL; | 
 | 1118 | } | 
 | 1119 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | /* Lookup an association for an inbound skb. */ | 
 | 1121 | static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, | 
 | 1122 | 				      const union sctp_addr *paddr, | 
 | 1123 | 				      const union sctp_addr *laddr, | 
 | 1124 | 				      struct sctp_transport **transportp) | 
 | 1125 | { | 
 | 1126 | 	struct sctp_association *asoc; | 
 | 1127 |  | 
 | 1128 | 	asoc = __sctp_lookup_association(laddr, paddr, transportp); | 
 | 1129 |  | 
 | 1130 | 	/* Further lookup for INIT/INIT-ACK packets. | 
 | 1131 | 	 * SCTP Implementors Guide, 2.18 Handling of address | 
 | 1132 | 	 * parameters within the INIT or INIT-ACK. | 
 | 1133 | 	 */ | 
 | 1134 | 	if (!asoc) | 
| Vlad Yasevich | df21857 | 2007-12-20 14:10:38 -0800 | [diff] [blame] | 1135 | 		asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 |  | 
 | 1137 | 	return asoc; | 
 | 1138 | } |