| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 1 | /* SCTP kernel implementation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * (C) Copyright IBM Corp. 2001, 2004 | 
 | 3 |  * Copyright (c) 1999-2000 Cisco, Inc. | 
 | 4 |  * Copyright (c) 1999-2001 Motorola, Inc. | 
 | 5 |  * Copyright (c) 2001 Intel Corp. | 
 | 6 |  * Copyright (c) 2001 Nokia, Inc. | 
 | 7 |  * Copyright (c) 2001 La Monte H.P. Yarroll | 
 | 8 |  * | 
 | 9 |  * This abstraction carries sctp events to the ULP (sockets). | 
 | 10 |  * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 11 |  * This SCTP implementation is free software; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 |  * you can redistribute it and/or modify it under the terms of | 
 | 13 |  * the GNU General Public License as published by | 
 | 14 |  * the Free Software Foundation; either version 2, or (at your option) | 
 | 15 |  * any later version. | 
 | 16 |  * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 17 |  * This SCTP implementation is distributed in the hope that it | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 
 | 19 |  *                 ************************ | 
 | 20 |  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 
 | 21 |  * See the GNU General Public License for more details. | 
 | 22 |  * | 
 | 23 |  * You should have received a copy of the GNU General Public License | 
 | 24 |  * along with GNU CC; see the file COPYING.  If not, write to | 
 | 25 |  * the Free Software Foundation, 59 Temple Place - Suite 330, | 
 | 26 |  * Boston, MA 02111-1307, USA. | 
 | 27 |  * | 
 | 28 |  * Please send any bug reports or fixes you make to the | 
 | 29 |  * email address(es): | 
 | 30 |  *    lksctp developers <lksctp-developers@lists.sourceforge.net> | 
 | 31 |  * | 
 | 32 |  * Or submit a bug report through the following website: | 
 | 33 |  *    http://www.sf.net/projects/lksctp | 
 | 34 |  * | 
 | 35 |  * Written or modified by: | 
 | 36 |  *    Jon Grimm             <jgrimm@us.ibm.com> | 
 | 37 |  *    La Monte H.P. Yarroll <piggy@acm.org> | 
 | 38 |  *    Sridhar Samudrala     <sri@us.ibm.com> | 
 | 39 |  * | 
 | 40 |  * Any bugs reported given to us we will try to fix... any fixes shared will | 
 | 41 |  * be incorporated into the next SCTP release. | 
 | 42 |  */ | 
 | 43 |  | 
 | 44 | #include <linux/types.h> | 
 | 45 | #include <linux/skbuff.h> | 
 | 46 | #include <net/sock.h> | 
 | 47 | #include <net/sctp/structs.h> | 
 | 48 | #include <net/sctp/sctp.h> | 
 | 49 | #include <net/sctp/sm.h> | 
 | 50 |  | 
 | 51 | /* Forward declarations for internal helpers.  */ | 
 | 52 | static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 53 | 					      struct sctp_ulpevent *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 55 | 					      struct sctp_ulpevent *); | 
| Vlad Yasevich | ef5d4cf2 | 2007-12-16 14:05:45 -0800 | [diff] [blame] | 56 | static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
 | 58 | /* 1st Level Abstractions */ | 
 | 59 |  | 
 | 60 | /* Initialize a ULP queue from a block of memory.  */ | 
 | 61 | struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, | 
 | 62 | 				 struct sctp_association *asoc) | 
 | 63 | { | 
 | 64 | 	memset(ulpq, 0, sizeof(struct sctp_ulpq)); | 
 | 65 |  | 
 | 66 | 	ulpq->asoc = asoc; | 
 | 67 | 	skb_queue_head_init(&ulpq->reasm); | 
 | 68 | 	skb_queue_head_init(&ulpq->lobby); | 
 | 69 | 	ulpq->pd_mode  = 0; | 
 | 70 | 	ulpq->malloced = 0; | 
 | 71 |  | 
 | 72 | 	return ulpq; | 
 | 73 | } | 
 | 74 |  | 
 | 75 |  | 
 | 76 | /* Flush the reassembly and ordering queues.  */ | 
| Vlad Yasevich | 0b58a81 | 2007-03-19 17:01:17 -0700 | [diff] [blame] | 77 | void sctp_ulpq_flush(struct sctp_ulpq *ulpq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | { | 
 | 79 | 	struct sk_buff *skb; | 
 | 80 | 	struct sctp_ulpevent *event; | 
 | 81 |  | 
 | 82 | 	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { | 
 | 83 | 		event = sctp_skb2event(skb); | 
 | 84 | 		sctp_ulpevent_free(event); | 
 | 85 | 	} | 
 | 86 |  | 
 | 87 | 	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { | 
 | 88 | 		event = sctp_skb2event(skb); | 
 | 89 | 		sctp_ulpevent_free(event); | 
 | 90 | 	} | 
 | 91 |  | 
 | 92 | } | 
 | 93 |  | 
 | 94 | /* Dispose of a ulpqueue.  */ | 
 | 95 | void sctp_ulpq_free(struct sctp_ulpq *ulpq) | 
 | 96 | { | 
 | 97 | 	sctp_ulpq_flush(ulpq); | 
 | 98 | 	if (ulpq->malloced) | 
 | 99 | 		kfree(ulpq); | 
 | 100 | } | 
 | 101 |  | 
 | 102 | /* Process an incoming DATA chunk.  */ | 
 | 103 | int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 104 | 			gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | { | 
 | 106 | 	struct sk_buff_head temp; | 
 | 107 | 	sctp_data_chunk_t *hdr; | 
 | 108 | 	struct sctp_ulpevent *event; | 
 | 109 |  | 
 | 110 | 	hdr = (sctp_data_chunk_t *) chunk->chunk_hdr; | 
 | 111 |  | 
 | 112 | 	/* Create an event from the incoming chunk. */ | 
 | 113 | 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); | 
 | 114 | 	if (!event) | 
 | 115 | 		return -ENOMEM; | 
 | 116 |  | 
 | 117 | 	/* Do reassembly if needed.  */ | 
 | 118 | 	event = sctp_ulpq_reasm(ulpq, event); | 
 | 119 |  | 
 | 120 | 	/* Do ordering if needed.  */ | 
 | 121 | 	if ((event) && (event->msg_flags & MSG_EOR)){ | 
 | 122 | 		/* Create a temporary list to collect chunks on.  */ | 
 | 123 | 		skb_queue_head_init(&temp); | 
 | 124 | 		__skb_queue_tail(&temp, sctp_event2skb(event)); | 
 | 125 |  | 
 | 126 | 		event = sctp_ulpq_order(ulpq, event); | 
 | 127 | 	} | 
 | 128 |  | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 129 | 	/* Send event to the ULP.  'event' is the sctp_ulpevent for | 
 | 130 | 	 * very first SKB on the 'temp' list. | 
 | 131 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | 	if (event) | 
 | 133 | 		sctp_ulpq_tail_event(ulpq, event); | 
 | 134 |  | 
 | 135 | 	return 0; | 
 | 136 | } | 
 | 137 |  | 
 | 138 | /* Add a new event for propagation to the ULP.  */ | 
 | 139 | /* Clear the partial delivery mode for this socket.   Note: This | 
 | 140 |  * assumes that no association is currently in partial delivery mode. | 
 | 141 |  */ | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 142 | int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | { | 
 | 144 | 	struct sctp_sock *sp = sctp_sk(sk); | 
 | 145 |  | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 146 | 	if (atomic_dec_and_test(&sp->pd_mode)) { | 
 | 147 | 		/* This means there are no other associations in PD, so | 
 | 148 | 		 * we can go ahead and clear out the lobby in one shot | 
 | 149 | 		 */ | 
 | 150 | 		if (!skb_queue_empty(&sp->pd_lobby)) { | 
 | 151 | 			struct list_head *list; | 
 | 152 | 			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); | 
 | 153 | 			list = (struct list_head *)&sctp_sk(sk)->pd_lobby; | 
 | 154 | 			INIT_LIST_HEAD(list); | 
 | 155 | 			return 1; | 
 | 156 | 		} | 
 | 157 | 	} else { | 
 | 158 | 		/* There are other associations in PD, so we only need to | 
 | 159 | 		 * pull stuff out of the lobby that belongs to the | 
 | 160 | 		 * associations that is exiting PD (all of its notifications | 
 | 161 | 		 * are posted here). | 
 | 162 | 		 */ | 
 | 163 | 		if (!skb_queue_empty(&sp->pd_lobby) && asoc) { | 
 | 164 | 			struct sk_buff *skb, *tmp; | 
 | 165 | 			struct sctp_ulpevent *event; | 
 | 166 |  | 
 | 167 | 			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) { | 
 | 168 | 				event = sctp_skb2event(skb); | 
 | 169 | 				if (event->asoc == asoc) { | 
 | 170 | 					__skb_unlink(skb, &sp->pd_lobby); | 
 | 171 | 					__skb_queue_tail(&sk->sk_receive_queue, | 
 | 172 | 							 skb); | 
 | 173 | 				} | 
 | 174 | 			} | 
 | 175 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | 	} | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 177 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | 	return 0; | 
 | 179 | } | 
 | 180 |  | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 181 | /* Set the pd_mode on the socket and ulpq */ | 
 | 182 | static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) | 
 | 183 | { | 
 | 184 | 	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); | 
 | 185 |  | 
 | 186 | 	atomic_inc(&sp->pd_mode); | 
 | 187 | 	ulpq->pd_mode = 1; | 
 | 188 | } | 
 | 189 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | /* Clear the pd_mode and restart any pending messages waiting for delivery. */ | 
 | 191 | static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) | 
 | 192 | { | 
 | 193 | 	ulpq->pd_mode = 0; | 
| Vlad Yasevich | ef5d4cf2 | 2007-12-16 14:05:45 -0800 | [diff] [blame] | 194 | 	sctp_ulpq_reasm_drain(ulpq); | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 195 | 	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | } | 
 | 197 |  | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 198 | /* If the SKB of 'event' is on a list, it is the first such member | 
 | 199 |  * of that list. | 
 | 200 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | 
 | 202 | { | 
 | 203 | 	struct sock *sk = ulpq->asoc->base.sk; | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 204 | 	struct sk_buff_head *queue, *skb_list; | 
 | 205 | 	struct sk_buff *skb = sctp_event2skb(event); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | 	int clear_pd = 0; | 
 | 207 |  | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 208 | 	skb_list = (struct sk_buff_head *) skb->prev; | 
 | 209 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | 	/* If the socket is just going to throw this away, do not | 
 | 211 | 	 * even try to deliver it. | 
 | 212 | 	 */ | 
 | 213 | 	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) | 
 | 214 | 		goto out_free; | 
 | 215 |  | 
 | 216 | 	/* Check if the user wishes to receive this event.  */ | 
 | 217 | 	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) | 
 | 218 | 		goto out_free; | 
 | 219 |  | 
 | 220 | 	/* If we are in partial delivery mode, post to the lobby until | 
 | 221 | 	 * partial delivery is cleared, unless, of course _this_ is | 
 | 222 | 	 * the association the cause of the partial delivery. | 
 | 223 | 	 */ | 
 | 224 |  | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 225 | 	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | 		queue = &sk->sk_receive_queue; | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 227 | 	} else { | 
 | 228 | 		if (ulpq->pd_mode) { | 
 | 229 | 			/* If the association is in partial delivery, we | 
 | 230 | 			 * need to finish delivering the partially processed | 
 | 231 | 			 * packet before passing any other data.  This is | 
 | 232 | 			 * because we don't truly support stream interleaving. | 
 | 233 | 			 */ | 
 | 234 | 			if ((event->msg_flags & MSG_NOTIFICATION) || | 
 | 235 | 			    (SCTP_DATA_NOT_FRAG == | 
 | 236 | 				    (event->msg_flags & SCTP_DATA_FRAG_MASK))) | 
 | 237 | 				queue = &sctp_sk(sk)->pd_lobby; | 
 | 238 | 			else { | 
 | 239 | 				clear_pd = event->msg_flags & MSG_EOR; | 
 | 240 | 				queue = &sk->sk_receive_queue; | 
 | 241 | 			} | 
 | 242 | 		} else { | 
 | 243 | 			/* | 
 | 244 | 			 * If fragment interleave is enabled, we | 
 | 245 | 			 * can queue this to the recieve queue instead | 
 | 246 | 			 * of the lobby. | 
 | 247 | 			 */ | 
 | 248 | 			if (sctp_sk(sk)->frag_interleave) | 
 | 249 | 				queue = &sk->sk_receive_queue; | 
 | 250 | 			else | 
 | 251 | 				queue = &sctp_sk(sk)->pd_lobby; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | 		} | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 253 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 |  | 
 | 255 | 	/* If we are harvesting multiple skbs they will be | 
 | 256 | 	 * collected on a list. | 
 | 257 | 	 */ | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 258 | 	if (skb_list) | 
 | 259 | 		sctp_skb_list_tail(skb_list, queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | 	else | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 261 | 		__skb_queue_tail(queue, skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 |  | 
 | 263 | 	/* Did we just complete partial delivery and need to get | 
 | 264 | 	 * rolling again?  Move pending data to the receive | 
 | 265 | 	 * queue. | 
 | 266 | 	 */ | 
 | 267 | 	if (clear_pd) | 
 | 268 | 		sctp_ulpq_clear_pd(ulpq); | 
 | 269 |  | 
 | 270 | 	if (queue == &sk->sk_receive_queue) | 
 | 271 | 		sk->sk_data_ready(sk, 0); | 
 | 272 | 	return 1; | 
 | 273 |  | 
 | 274 | out_free: | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 275 | 	if (skb_list) | 
 | 276 | 		sctp_queue_purge_ulpevents(skb_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | 	else | 
 | 278 | 		sctp_ulpevent_free(event); | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 279 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | 	return 0; | 
 | 281 | } | 
 | 282 |  | 
 | 283 | /* 2nd Level Abstractions */ | 
 | 284 |  | 
 | 285 | /* Helper function to store chunks that need to be reassembled.  */ | 
| Vlad Yasevich | 01f2d38 | 2008-01-11 11:17:27 -0500 | [diff] [blame] | 286 | static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | 					 struct sctp_ulpevent *event) | 
 | 288 | { | 
 | 289 | 	struct sk_buff *pos; | 
 | 290 | 	struct sctp_ulpevent *cevent; | 
 | 291 | 	__u32 tsn, ctsn; | 
 | 292 |  | 
 | 293 | 	tsn = event->tsn; | 
 | 294 |  | 
 | 295 | 	/* See if it belongs at the end. */ | 
 | 296 | 	pos = skb_peek_tail(&ulpq->reasm); | 
 | 297 | 	if (!pos) { | 
 | 298 | 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); | 
 | 299 | 		return; | 
 | 300 | 	} | 
 | 301 |  | 
 | 302 | 	/* Short circuit just dropping it at the end. */ | 
 | 303 | 	cevent = sctp_skb2event(pos); | 
 | 304 | 	ctsn = cevent->tsn; | 
 | 305 | 	if (TSN_lt(ctsn, tsn)) { | 
 | 306 | 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); | 
 | 307 | 		return; | 
 | 308 | 	} | 
 | 309 |  | 
 | 310 | 	/* Find the right place in this list. We store them by TSN.  */ | 
 | 311 | 	skb_queue_walk(&ulpq->reasm, pos) { | 
 | 312 | 		cevent = sctp_skb2event(pos); | 
 | 313 | 		ctsn = cevent->tsn; | 
 | 314 |  | 
 | 315 | 		if (TSN_lt(tsn, ctsn)) | 
 | 316 | 			break; | 
 | 317 | 	} | 
 | 318 |  | 
 | 319 | 	/* Insert before pos. */ | 
 | 320 | 	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm); | 
 | 321 |  | 
 | 322 | } | 
 | 323 |  | 
 | 324 | /* Helper function to return an event corresponding to the reassembled | 
 | 325 |  * datagram. | 
 | 326 |  * This routine creates a re-assembled skb given the first and last skb's | 
 | 327 |  * as stored in the reassembly queue. The skb's may be non-linear if the sctp | 
 | 328 |  * payload was fragmented on the way and ip had to reassemble them. | 
 | 329 |  * We add the rest of skb's to the first skb's fraglist. | 
 | 330 |  */ | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 331 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | { | 
 | 333 | 	struct sk_buff *pos; | 
| Vladislav Yasevich | 672e7cc | 2006-05-05 17:03:49 -0700 | [diff] [blame] | 334 | 	struct sk_buff *new = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 	struct sctp_ulpevent *event; | 
 | 336 | 	struct sk_buff *pnext, *last; | 
 | 337 | 	struct sk_buff *list = skb_shinfo(f_frag)->frag_list; | 
 | 338 |  | 
 | 339 | 	/* Store the pointer to the 2nd skb */ | 
 | 340 | 	if (f_frag == l_frag) | 
 | 341 | 		pos = NULL; | 
 | 342 | 	else | 
 | 343 | 		pos = f_frag->next; | 
 | 344 |  | 
 | 345 | 	/* Get the last skb in the f_frag's frag_list if present. */ | 
 | 346 | 	for (last = list; list; last = list, list = list->next); | 
 | 347 |  | 
 | 348 | 	/* Add the list of remaining fragments to the first fragments | 
 | 349 | 	 * frag_list. | 
 | 350 | 	 */ | 
 | 351 | 	if (last) | 
 | 352 | 		last->next = pos; | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 353 | 	else { | 
 | 354 | 		if (skb_cloned(f_frag)) { | 
 | 355 | 			/* This is a cloned skb, we can't just modify | 
 | 356 | 			 * the frag_list.  We need a new skb to do that. | 
 | 357 | 			 * Instead of calling skb_unshare(), we'll do it | 
 | 358 | 			 * ourselves since we need to delay the free. | 
 | 359 | 			 */ | 
 | 360 | 			new = skb_copy(f_frag, GFP_ATOMIC); | 
 | 361 | 			if (!new) | 
 | 362 | 				return NULL;	/* try again later */ | 
| Vladislav Yasevich | 672e7cc | 2006-05-05 17:03:49 -0700 | [diff] [blame] | 363 |  | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 364 | 			sctp_skb_set_owner_r(new, f_frag->sk); | 
| Vladislav Yasevich | 672e7cc | 2006-05-05 17:03:49 -0700 | [diff] [blame] | 365 |  | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 366 | 			skb_shinfo(new)->frag_list = pos; | 
 | 367 | 		} else | 
 | 368 | 			skb_shinfo(f_frag)->frag_list = pos; | 
 | 369 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 |  | 
 | 371 | 	/* Remove the first fragment from the reassembly queue.  */ | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 372 | 	__skb_unlink(f_frag, queue); | 
| Vladislav Yasevich | 672e7cc | 2006-05-05 17:03:49 -0700 | [diff] [blame] | 373 |  | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 374 | 	/* if we did unshare, then free the old skb and re-assign */ | 
 | 375 | 	if (new) { | 
 | 376 | 		kfree_skb(f_frag); | 
 | 377 | 		f_frag = new; | 
 | 378 | 	} | 
| Vladislav Yasevich | 672e7cc | 2006-05-05 17:03:49 -0700 | [diff] [blame] | 379 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | 	while (pos) { | 
 | 381 |  | 
 | 382 | 		pnext = pos->next; | 
 | 383 |  | 
 | 384 | 		/* Update the len and data_len fields of the first fragment. */ | 
 | 385 | 		f_frag->len += pos->len; | 
 | 386 | 		f_frag->data_len += pos->len; | 
 | 387 |  | 
 | 388 | 		/* Remove the fragment from the reassembly queue.  */ | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 389 | 		__skb_unlink(pos, queue); | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 390 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | 		/* Break if we have reached the last fragment.  */ | 
 | 392 | 		if (pos == l_frag) | 
 | 393 | 			break; | 
 | 394 | 		pos->next = pnext; | 
 | 395 | 		pos = pnext; | 
| Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 396 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 |  | 
 | 398 | 	event = sctp_skb2event(f_frag); | 
 | 399 | 	SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); | 
 | 400 |  | 
 | 401 | 	return event; | 
 | 402 | } | 
 | 403 |  | 
 | 404 |  | 
 | 405 | /* Helper function to check if an incoming chunk has filled up the last | 
 | 406 |  * missing fragment in a SCTP datagram and return the corresponding event. | 
 | 407 |  */ | 
| Vlad Yasevich | 01f2d38 | 2008-01-11 11:17:27 -0500 | [diff] [blame] | 408 | static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | { | 
 | 410 | 	struct sk_buff *pos; | 
 | 411 | 	struct sctp_ulpevent *cevent; | 
 | 412 | 	struct sk_buff *first_frag = NULL; | 
 | 413 | 	__u32 ctsn, next_tsn; | 
 | 414 | 	struct sctp_ulpevent *retval = NULL; | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 415 | 	struct sk_buff *pd_first = NULL; | 
 | 416 | 	struct sk_buff *pd_last = NULL; | 
 | 417 | 	size_t pd_len = 0; | 
 | 418 | 	struct sctp_association *asoc; | 
 | 419 | 	u32 pd_point; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 |  | 
 | 421 | 	/* Initialized to 0 just to avoid compiler warning message.  Will | 
 | 422 | 	 * never be used with this value. It is referenced only after it | 
 | 423 | 	 * is set when we find the first fragment of a message. | 
 | 424 | 	 */ | 
 | 425 | 	next_tsn = 0; | 
 | 426 |  | 
 | 427 | 	/* The chunks are held in the reasm queue sorted by TSN. | 
 | 428 | 	 * Walk through the queue sequentially and look for a sequence of | 
 | 429 | 	 * fragmented chunks that complete a datagram. | 
 | 430 | 	 * 'first_frag' and next_tsn are reset when we find a chunk which | 
 | 431 | 	 * is the first fragment of a datagram. Once these 2 fields are set | 
 | 432 | 	 * we expect to find the remaining middle fragments and the last | 
 | 433 | 	 * fragment in order. If not, first_frag is reset to NULL and we | 
 | 434 | 	 * start the next pass when we find another first fragment. | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 435 | 	 * | 
 | 436 | 	 * There is a potential to do partial delivery if user sets | 
 | 437 | 	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here | 
 | 438 | 	 * to see if can do PD. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | 	 */ | 
 | 440 | 	skb_queue_walk(&ulpq->reasm, pos) { | 
 | 441 | 		cevent = sctp_skb2event(pos); | 
 | 442 | 		ctsn = cevent->tsn; | 
 | 443 |  | 
 | 444 | 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 
 | 445 | 		case SCTP_DATA_FIRST_FRAG: | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 446 | 			/* If this "FIRST_FRAG" is the first | 
 | 447 | 			 * element in the queue, then count it towards | 
 | 448 | 			 * possible PD. | 
 | 449 | 			 */ | 
 | 450 | 			if (pos == ulpq->reasm.next) { | 
 | 451 | 			    pd_first = pos; | 
 | 452 | 			    pd_last = pos; | 
 | 453 | 			    pd_len = pos->len; | 
 | 454 | 			} else { | 
 | 455 | 			    pd_first = NULL; | 
 | 456 | 			    pd_last = NULL; | 
 | 457 | 			    pd_len = 0; | 
 | 458 | 			} | 
 | 459 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | 			first_frag = pos; | 
 | 461 | 			next_tsn = ctsn + 1; | 
 | 462 | 			break; | 
 | 463 |  | 
 | 464 | 		case SCTP_DATA_MIDDLE_FRAG: | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 465 | 			if ((first_frag) && (ctsn == next_tsn)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | 				next_tsn++; | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 467 | 				if (pd_first) { | 
 | 468 | 				    pd_last = pos; | 
 | 469 | 				    pd_len += pos->len; | 
 | 470 | 				} | 
 | 471 | 			} else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | 				first_frag = NULL; | 
 | 473 | 			break; | 
 | 474 |  | 
 | 475 | 		case SCTP_DATA_LAST_FRAG: | 
 | 476 | 			if (first_frag && (ctsn == next_tsn)) | 
 | 477 | 				goto found; | 
 | 478 | 			else | 
 | 479 | 				first_frag = NULL; | 
 | 480 | 			break; | 
| Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 481 | 		} | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 482 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 |  | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 484 | 	asoc = ulpq->asoc; | 
 | 485 | 	if (pd_first) { | 
 | 486 | 		/* Make sure we can enter partial deliver. | 
 | 487 | 		 * We can trigger partial delivery only if framgent | 
 | 488 | 		 * interleave is set, or the socket is not already | 
 | 489 | 		 * in  partial delivery. | 
 | 490 | 		 */ | 
 | 491 | 		if (!sctp_sk(asoc->base.sk)->frag_interleave && | 
 | 492 | 		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) | 
 | 493 | 			goto done; | 
 | 494 |  | 
 | 495 | 		cevent = sctp_skb2event(pd_first); | 
 | 496 | 		pd_point = sctp_sk(asoc->base.sk)->pd_point; | 
 | 497 | 		if (pd_point && pd_point <= pd_len) { | 
 | 498 | 			retval = sctp_make_reassembled_event(&ulpq->reasm, | 
 | 499 | 							     pd_first, | 
 | 500 | 							     pd_last); | 
 | 501 | 			if (retval) | 
 | 502 | 				sctp_ulpq_set_pd(ulpq); | 
 | 503 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | 	} | 
 | 505 | done: | 
 | 506 | 	return retval; | 
 | 507 | found: | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 508 | 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | 	if (retval) | 
 | 510 | 		retval->msg_flags |= MSG_EOR; | 
 | 511 | 	goto done; | 
 | 512 | } | 
 | 513 |  | 
 | 514 | /* Retrieve the next set of fragments of a partial message. */ | 
| Vlad Yasevich | 01f2d38 | 2008-01-11 11:17:27 -0500 | [diff] [blame] | 515 | static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | { | 
 | 517 | 	struct sk_buff *pos, *last_frag, *first_frag; | 
 | 518 | 	struct sctp_ulpevent *cevent; | 
 | 519 | 	__u32 ctsn, next_tsn; | 
 | 520 | 	int is_last; | 
 | 521 | 	struct sctp_ulpevent *retval; | 
 | 522 |  | 
 | 523 | 	/* The chunks are held in the reasm queue sorted by TSN. | 
 | 524 | 	 * Walk through the queue sequentially and look for the first | 
 | 525 | 	 * sequence of fragmented chunks. | 
 | 526 | 	 */ | 
 | 527 |  | 
 | 528 | 	if (skb_queue_empty(&ulpq->reasm)) | 
 | 529 | 		return NULL; | 
 | 530 |  | 
 | 531 | 	last_frag = first_frag = NULL; | 
 | 532 | 	retval = NULL; | 
 | 533 | 	next_tsn = 0; | 
 | 534 | 	is_last = 0; | 
 | 535 |  | 
 | 536 | 	skb_queue_walk(&ulpq->reasm, pos) { | 
 | 537 | 		cevent = sctp_skb2event(pos); | 
 | 538 | 		ctsn = cevent->tsn; | 
 | 539 |  | 
 | 540 | 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 
 | 541 | 		case SCTP_DATA_MIDDLE_FRAG: | 
 | 542 | 			if (!first_frag) { | 
 | 543 | 				first_frag = pos; | 
 | 544 | 				next_tsn = ctsn + 1; | 
 | 545 | 				last_frag = pos; | 
 | 546 | 			} else if (next_tsn == ctsn) | 
 | 547 | 				next_tsn++; | 
 | 548 | 			else | 
 | 549 | 				goto done; | 
 | 550 | 			break; | 
 | 551 | 		case SCTP_DATA_LAST_FRAG: | 
 | 552 | 			if (!first_frag) | 
 | 553 | 				first_frag = pos; | 
 | 554 | 			else if (ctsn != next_tsn) | 
 | 555 | 				goto done; | 
 | 556 | 			last_frag = pos; | 
 | 557 | 			is_last = 1; | 
 | 558 | 			goto done; | 
 | 559 | 		default: | 
 | 560 | 			return NULL; | 
| Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 561 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | 	} | 
 | 563 |  | 
 | 564 | 	/* We have the reassembled event. There is no need to look | 
 | 565 | 	 * further. | 
 | 566 | 	 */ | 
 | 567 | done: | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 568 | 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | 	if (retval && is_last) | 
 | 570 | 		retval->msg_flags |= MSG_EOR; | 
 | 571 |  | 
 | 572 | 	return retval; | 
 | 573 | } | 
 | 574 |  | 
 | 575 |  | 
 | 576 | /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that | 
 | 577 |  * need reassembling. | 
 | 578 |  */ | 
 | 579 | static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, | 
 | 580 | 						struct sctp_ulpevent *event) | 
 | 581 | { | 
 | 582 | 	struct sctp_ulpevent *retval = NULL; | 
 | 583 |  | 
 | 584 | 	/* Check if this is part of a fragmented message.  */ | 
 | 585 | 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { | 
 | 586 | 		event->msg_flags |= MSG_EOR; | 
 | 587 | 		return event; | 
 | 588 | 	} | 
 | 589 |  | 
 | 590 | 	sctp_ulpq_store_reasm(ulpq, event); | 
 | 591 | 	if (!ulpq->pd_mode) | 
 | 592 | 		retval = sctp_ulpq_retrieve_reassembled(ulpq); | 
 | 593 | 	else { | 
 | 594 | 		__u32 ctsn, ctsnap; | 
 | 595 |  | 
 | 596 | 		/* Do not even bother unless this is the next tsn to | 
 | 597 | 		 * be delivered. | 
 | 598 | 		 */ | 
 | 599 | 		ctsn = event->tsn; | 
 | 600 | 		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); | 
 | 601 | 		if (TSN_lte(ctsn, ctsnap)) | 
 | 602 | 			retval = sctp_ulpq_retrieve_partial(ulpq); | 
 | 603 | 	} | 
 | 604 |  | 
 | 605 | 	return retval; | 
 | 606 | } | 
 | 607 |  | 
 | 608 | /* Retrieve the first part (sequential fragments) for partial delivery.  */ | 
| Vlad Yasevich | 01f2d38 | 2008-01-11 11:17:27 -0500 | [diff] [blame] | 609 | static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | { | 
 | 611 | 	struct sk_buff *pos, *last_frag, *first_frag; | 
 | 612 | 	struct sctp_ulpevent *cevent; | 
 | 613 | 	__u32 ctsn, next_tsn; | 
 | 614 | 	struct sctp_ulpevent *retval; | 
 | 615 |  | 
 | 616 | 	/* The chunks are held in the reasm queue sorted by TSN. | 
 | 617 | 	 * Walk through the queue sequentially and look for a sequence of | 
 | 618 | 	 * fragmented chunks that start a datagram. | 
 | 619 | 	 */ | 
 | 620 |  | 
 | 621 | 	if (skb_queue_empty(&ulpq->reasm)) | 
 | 622 | 		return NULL; | 
 | 623 |  | 
 | 624 | 	last_frag = first_frag = NULL; | 
 | 625 | 	retval = NULL; | 
 | 626 | 	next_tsn = 0; | 
 | 627 |  | 
 | 628 | 	skb_queue_walk(&ulpq->reasm, pos) { | 
 | 629 | 		cevent = sctp_skb2event(pos); | 
 | 630 | 		ctsn = cevent->tsn; | 
 | 631 |  | 
 | 632 | 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 
 | 633 | 		case SCTP_DATA_FIRST_FRAG: | 
 | 634 | 			if (!first_frag) { | 
 | 635 | 				first_frag = pos; | 
 | 636 | 				next_tsn = ctsn + 1; | 
 | 637 | 				last_frag = pos; | 
 | 638 | 			} else | 
 | 639 | 				goto done; | 
 | 640 | 			break; | 
 | 641 |  | 
 | 642 | 		case SCTP_DATA_MIDDLE_FRAG: | 
 | 643 | 			if (!first_frag) | 
 | 644 | 				return NULL; | 
 | 645 | 			if (ctsn == next_tsn) { | 
 | 646 | 				next_tsn++; | 
 | 647 | 				last_frag = pos; | 
 | 648 | 			} else | 
 | 649 | 				goto done; | 
 | 650 | 			break; | 
 | 651 | 		default: | 
 | 652 | 			return NULL; | 
| Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 653 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | 	} | 
 | 655 |  | 
 | 656 | 	/* We have the reassembled event. There is no need to look | 
 | 657 | 	 * further. | 
 | 658 | 	 */ | 
 | 659 | done: | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 660 | 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | 	return retval; | 
 | 662 | } | 
 | 663 |  | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 664 | /* | 
 | 665 |  * Flush out stale fragments from the reassembly queue when processing | 
 | 666 |  * a Forward TSN. | 
 | 667 |  * | 
 | 668 |  * RFC 3758, Section 3.6 | 
 | 669 |  * | 
 | 670 |  * After receiving and processing a FORWARD TSN, the data receiver MUST | 
 | 671 |  * take cautions in updating its re-assembly queue.  The receiver MUST | 
 | 672 |  * remove any partially reassembled message, which is still missing one | 
 | 673 |  * or more TSNs earlier than or equal to the new cumulative TSN point. | 
 | 674 |  * In the event that the receiver has invoked the partial delivery API, | 
 | 675 |  * a notification SHOULD also be generated to inform the upper layer API | 
 | 676 |  * that the message being partially delivered will NOT be completed. | 
 | 677 |  */ | 
 | 678 | void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) | 
 | 679 | { | 
 | 680 | 	struct sk_buff *pos, *tmp; | 
 | 681 | 	struct sctp_ulpevent *event; | 
 | 682 | 	__u32 tsn; | 
 | 683 |  | 
 | 684 | 	if (skb_queue_empty(&ulpq->reasm)) | 
 | 685 | 		return; | 
 | 686 |  | 
 | 687 | 	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { | 
 | 688 | 		event = sctp_skb2event(pos); | 
 | 689 | 		tsn = event->tsn; | 
 | 690 |  | 
 | 691 | 		/* Since the entire message must be abandoned by the | 
 | 692 | 		 * sender (item A3 in Section 3.5, RFC 3758), we can | 
 | 693 | 		 * free all fragments on the list that are less then | 
 | 694 | 		 * or equal to ctsn_point | 
 | 695 | 		 */ | 
 | 696 | 		if (TSN_lte(tsn, fwd_tsn)) { | 
 | 697 | 			__skb_unlink(pos, &ulpq->reasm); | 
 | 698 | 			sctp_ulpevent_free(event); | 
 | 699 | 		} else | 
 | 700 | 			break; | 
 | 701 | 	} | 
 | 702 | } | 
 | 703 |  | 
| Vlad Yasevich | ef5d4cf2 | 2007-12-16 14:05:45 -0800 | [diff] [blame] | 704 | /* | 
 | 705 |  * Drain the reassembly queue.  If we just cleared parted delivery, it | 
 | 706 |  * is possible that the reassembly queue will contain already reassembled | 
 | 707 |  * messages.  Retrieve any such messages and give them to the user. | 
 | 708 |  */ | 
 | 709 | static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq) | 
 | 710 | { | 
 | 711 | 	struct sctp_ulpevent *event = NULL; | 
 | 712 | 	struct sk_buff_head temp; | 
 | 713 |  | 
 | 714 | 	if (skb_queue_empty(&ulpq->reasm)) | 
 | 715 | 		return; | 
 | 716 |  | 
 | 717 | 	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { | 
 | 718 | 		/* Do ordering if needed.  */ | 
 | 719 | 		if ((event) && (event->msg_flags & MSG_EOR)){ | 
 | 720 | 			skb_queue_head_init(&temp); | 
 | 721 | 			__skb_queue_tail(&temp, sctp_event2skb(event)); | 
 | 722 |  | 
 | 723 | 			event = sctp_ulpq_order(ulpq, event); | 
 | 724 | 		} | 
 | 725 |  | 
 | 726 | 		/* Send event to the ULP.  'event' is the | 
 | 727 | 		 * sctp_ulpevent for  very first SKB on the  temp' list. | 
 | 728 | 		 */ | 
 | 729 | 		if (event) | 
 | 730 | 			sctp_ulpq_tail_event(ulpq, event); | 
 | 731 | 	} | 
 | 732 | } | 
 | 733 |  | 
 | 734 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | /* Helper function to gather skbs that have possibly become | 
 | 736 |  * ordered by an an incoming chunk. | 
 | 737 |  */ | 
| Vlad Yasevich | 01f2d38 | 2008-01-11 11:17:27 -0500 | [diff] [blame] | 738 | static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | 					      struct sctp_ulpevent *event) | 
 | 740 | { | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 741 | 	struct sk_buff_head *event_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | 	struct sk_buff *pos, *tmp; | 
 | 743 | 	struct sctp_ulpevent *cevent; | 
 | 744 | 	struct sctp_stream *in; | 
 | 745 | 	__u16 sid, csid; | 
 | 746 | 	__u16 ssn, cssn; | 
 | 747 |  | 
 | 748 | 	sid = event->stream; | 
 | 749 | 	ssn = event->ssn; | 
 | 750 | 	in  = &ulpq->asoc->ssnmap->in; | 
 | 751 |  | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 752 | 	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; | 
 | 753 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | 	/* We are holding the chunks by stream, by SSN.  */ | 
 | 755 | 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) { | 
 | 756 | 		cevent = (struct sctp_ulpevent *) pos->cb; | 
 | 757 | 		csid = cevent->stream; | 
 | 758 | 		cssn = cevent->ssn; | 
 | 759 |  | 
 | 760 | 		/* Have we gone too far?  */ | 
 | 761 | 		if (csid > sid) | 
 | 762 | 			break; | 
 | 763 |  | 
 | 764 | 		/* Have we not gone far enough?  */ | 
 | 765 | 		if (csid < sid) | 
 | 766 | 			continue; | 
 | 767 |  | 
 | 768 | 		if (cssn != sctp_ssn_peek(in, sid)) | 
 | 769 | 			break; | 
 | 770 |  | 
 | 771 | 		/* Found it, so mark in the ssnmap. */ | 
 | 772 | 		sctp_ssn_next(in, sid); | 
 | 773 |  | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 774 | 		__skb_unlink(pos, &ulpq->lobby); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 |  | 
 | 776 | 		/* Attach all gathered skbs to the event.  */ | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 777 | 		__skb_queue_tail(event_list, pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | 	} | 
 | 779 | } | 
 | 780 |  | 
 | 781 | /* Helper function to store chunks needing ordering.  */ | 
| Vlad Yasevich | 01f2d38 | 2008-01-11 11:17:27 -0500 | [diff] [blame] | 782 | static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | 					   struct sctp_ulpevent *event) | 
 | 784 | { | 
 | 785 | 	struct sk_buff *pos; | 
 | 786 | 	struct sctp_ulpevent *cevent; | 
 | 787 | 	__u16 sid, csid; | 
 | 788 | 	__u16 ssn, cssn; | 
 | 789 |  | 
 | 790 | 	pos = skb_peek_tail(&ulpq->lobby); | 
 | 791 | 	if (!pos) { | 
 | 792 | 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | 
 | 793 | 		return; | 
 | 794 | 	} | 
 | 795 |  | 
 | 796 | 	sid = event->stream; | 
 | 797 | 	ssn = event->ssn; | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 798 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | 	cevent = (struct sctp_ulpevent *) pos->cb; | 
 | 800 | 	csid = cevent->stream; | 
 | 801 | 	cssn = cevent->ssn; | 
 | 802 | 	if (sid > csid) { | 
 | 803 | 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | 
 | 804 | 		return; | 
 | 805 | 	} | 
 | 806 |  | 
 | 807 | 	if ((sid == csid) && SSN_lt(cssn, ssn)) { | 
 | 808 | 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | 
 | 809 | 		return; | 
 | 810 | 	} | 
 | 811 |  | 
 | 812 | 	/* Find the right place in this list.  We store them by | 
 | 813 | 	 * stream ID and then by SSN. | 
 | 814 | 	 */ | 
 | 815 | 	skb_queue_walk(&ulpq->lobby, pos) { | 
 | 816 | 		cevent = (struct sctp_ulpevent *) pos->cb; | 
 | 817 | 		csid = cevent->stream; | 
 | 818 | 		cssn = cevent->ssn; | 
 | 819 |  | 
 | 820 | 		if (csid > sid) | 
 | 821 | 			break; | 
 | 822 | 		if (csid == sid && SSN_lt(ssn, cssn)) | 
 | 823 | 			break; | 
 | 824 | 	} | 
 | 825 |  | 
 | 826 |  | 
 | 827 | 	/* Insert before pos. */ | 
 | 828 | 	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby); | 
 | 829 |  | 
 | 830 | } | 
 | 831 |  | 
 | 832 | static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 833 | 					     struct sctp_ulpevent *event) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | { | 
 | 835 | 	__u16 sid, ssn; | 
 | 836 | 	struct sctp_stream *in; | 
 | 837 |  | 
 | 838 | 	/* Check if this message needs ordering.  */ | 
 | 839 | 	if (SCTP_DATA_UNORDERED & event->msg_flags) | 
 | 840 | 		return event; | 
 | 841 |  | 
 | 842 | 	/* Note: The stream ID must be verified before this routine.  */ | 
 | 843 | 	sid = event->stream; | 
 | 844 | 	ssn = event->ssn; | 
 | 845 | 	in  = &ulpq->asoc->ssnmap->in; | 
 | 846 |  | 
 | 847 | 	/* Is this the expected SSN for this stream ID?  */ | 
 | 848 | 	if (ssn != sctp_ssn_peek(in, sid)) { | 
 | 849 | 		/* We've received something out of order, so find where it | 
 | 850 | 		 * needs to be placed.  We order by stream and then by SSN. | 
 | 851 | 		 */ | 
 | 852 | 		sctp_ulpq_store_ordered(ulpq, event); | 
 | 853 | 		return NULL; | 
 | 854 | 	} | 
 | 855 |  | 
 | 856 | 	/* Mark that the next chunk has been found.  */ | 
 | 857 | 	sctp_ssn_next(in, sid); | 
 | 858 |  | 
 | 859 | 	/* Go find any other chunks that were waiting for | 
 | 860 | 	 * ordering. | 
 | 861 | 	 */ | 
 | 862 | 	sctp_ulpq_retrieve_ordered(ulpq, event); | 
 | 863 |  | 
 | 864 | 	return event; | 
 | 865 | } | 
 | 866 |  | 
 | 867 | /* Helper function to gather skbs that have possibly become | 
 | 868 |  * ordered by forward tsn skipping their dependencies. | 
 | 869 |  */ | 
| Vlad Yasevich | 01f2d38 | 2008-01-11 11:17:27 -0500 | [diff] [blame] | 870 | static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | { | 
 | 872 | 	struct sk_buff *pos, *tmp; | 
 | 873 | 	struct sctp_ulpevent *cevent; | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 874 | 	struct sctp_ulpevent *event; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | 	struct sctp_stream *in; | 
 | 876 | 	struct sk_buff_head temp; | 
| Vlad Yasevich | c068be5 | 2008-01-15 11:41:56 -0500 | [diff] [blame] | 877 | 	struct sk_buff_head *lobby = &ulpq->lobby; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | 	__u16 csid, cssn; | 
 | 879 |  | 
 | 880 | 	in  = &ulpq->asoc->ssnmap->in; | 
 | 881 |  | 
 | 882 | 	/* We are holding the chunks by stream, by SSN.  */ | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 883 | 	skb_queue_head_init(&temp); | 
 | 884 | 	event = NULL; | 
| Vlad Yasevich | c068be5 | 2008-01-15 11:41:56 -0500 | [diff] [blame] | 885 | 	sctp_skb_for_each(pos, lobby, tmp) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | 		cevent = (struct sctp_ulpevent *) pos->cb; | 
 | 887 | 		csid = cevent->stream; | 
 | 888 | 		cssn = cevent->ssn; | 
 | 889 |  | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 890 | 		/* Have we gone too far?  */ | 
 | 891 | 		if (csid > sid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | 			break; | 
 | 893 |  | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 894 | 		/* Have we not gone far enough?  */ | 
 | 895 | 		if (csid < sid) | 
 | 896 | 			continue; | 
 | 897 |  | 
 | 898 | 		/* see if this ssn has been marked by skipping */ | 
| Vlad Yasevich | c068be5 | 2008-01-15 11:41:56 -0500 | [diff] [blame] | 899 | 		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 900 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 |  | 
| Vlad Yasevich | c068be5 | 2008-01-15 11:41:56 -0500 | [diff] [blame] | 902 | 		__skb_unlink(pos, lobby); | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 903 | 		if (!event) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | 			/* Create a temporary list to collect chunks on.  */ | 
 | 905 | 			event = sctp_skb2event(pos); | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 906 |  | 
 | 907 | 		/* Attach all gathered skbs to the event.  */ | 
 | 908 | 		__skb_queue_tail(&temp, pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | 	} | 
 | 910 |  | 
| Vlad Yasevich | c068be5 | 2008-01-15 11:41:56 -0500 | [diff] [blame] | 911 | 	/* If we didn't reap any data, see if the next expected SSN | 
 | 912 | 	 * is next on the queue and if so, use that. | 
 | 913 | 	 */ | 
 | 914 | 	if (event == NULL && pos != (struct sk_buff *)lobby) { | 
 | 915 | 		cevent = (struct sctp_ulpevent *) pos->cb; | 
 | 916 | 		csid = cevent->stream; | 
 | 917 | 		cssn = cevent->ssn; | 
 | 918 |  | 
 | 919 | 		if (csid == sid && cssn == sctp_ssn_peek(in, csid)) { | 
 | 920 | 			sctp_ssn_next(in, csid); | 
 | 921 | 			__skb_unlink(pos, lobby); | 
 | 922 | 			__skb_queue_tail(&temp, pos); | 
 | 923 | 			event = sctp_skb2event(pos); | 
 | 924 | 		} | 
 | 925 | 	} | 
 | 926 |  | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 927 | 	/* Send event to the ULP.  'event' is the sctp_ulpevent for | 
 | 928 | 	 * very first SKB on the 'temp' list. | 
 | 929 | 	 */ | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 930 | 	if (event) { | 
 | 931 | 		/* see if we have more ordered that we can deliver */ | 
 | 932 | 		sctp_ulpq_retrieve_ordered(ulpq, event); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | 		sctp_ulpq_tail_event(ulpq, event); | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 934 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | } | 
 | 936 |  | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 937 | /* Skip over an SSN. This is used during the processing of | 
 | 938 |  * Forwared TSN chunk to skip over the abandoned ordered data | 
 | 939 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | 
 | 941 | { | 
 | 942 | 	struct sctp_stream *in; | 
 | 943 |  | 
 | 944 | 	/* Note: The stream ID must be verified before this routine.  */ | 
 | 945 | 	in  = &ulpq->asoc->ssnmap->in; | 
 | 946 |  | 
 | 947 | 	/* Is this an old SSN?  If so ignore. */ | 
 | 948 | 	if (SSN_lt(ssn, sctp_ssn_peek(in, sid))) | 
 | 949 | 		return; | 
 | 950 |  | 
 | 951 | 	/* Mark that we are no longer expecting this SSN or lower. */ | 
 | 952 | 	sctp_ssn_skip(in, sid, ssn); | 
 | 953 |  | 
 | 954 | 	/* Go find any other chunks that were waiting for | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 955 | 	 * ordering and deliver them if needed. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | 	 */ | 
| Vlad Yasevich | ea2dfb3 | 2007-07-13 17:01:19 -0400 | [diff] [blame] | 957 | 	sctp_ulpq_reap_ordered(ulpq, sid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | 	return; | 
 | 959 | } | 
 | 960 |  | 
| Pavel Emelyanov | 16d14ef | 2007-10-23 20:30:25 -0700 | [diff] [blame] | 961 | static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, | 
 | 962 | 		struct sk_buff_head *list, __u16 needed) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | { | 
 | 964 | 	__u16 freed = 0; | 
 | 965 | 	__u32 tsn; | 
 | 966 | 	struct sk_buff *skb; | 
 | 967 | 	struct sctp_ulpevent *event; | 
 | 968 | 	struct sctp_tsnmap *tsnmap; | 
 | 969 |  | 
 | 970 | 	tsnmap = &ulpq->asoc->peer.tsn_map; | 
 | 971 |  | 
| Pavel Emelyanov | 16d14ef | 2007-10-23 20:30:25 -0700 | [diff] [blame] | 972 | 	while ((skb = __skb_dequeue_tail(list)) != NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | 		freed += skb_headlen(skb); | 
 | 974 | 		event = sctp_skb2event(skb); | 
 | 975 | 		tsn = event->tsn; | 
 | 976 |  | 
 | 977 | 		sctp_ulpevent_free(event); | 
 | 978 | 		sctp_tsnmap_renege(tsnmap, tsn); | 
 | 979 | 		if (freed >= needed) | 
 | 980 | 			return freed; | 
 | 981 | 	} | 
 | 982 |  | 
 | 983 | 	return freed; | 
 | 984 | } | 
 | 985 |  | 
| Pavel Emelyanov | 16d14ef | 2007-10-23 20:30:25 -0700 | [diff] [blame] | 986 | /* Renege 'needed' bytes from the ordering queue. */ | 
 | 987 | static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | 
 | 988 | { | 
 | 989 | 	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); | 
 | 990 | } | 
 | 991 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | /* Renege 'needed' bytes from the reassembly queue. */ | 
 | 993 | static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) | 
 | 994 | { | 
| Pavel Emelyanov | 16d14ef | 2007-10-23 20:30:25 -0700 | [diff] [blame] | 995 | 	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | } | 
 | 997 |  | 
 | 998 | /* Partial deliver the first message as there is pressure on rwnd. */ | 
 | 999 | void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | 
| Alexey Dobriyan | 3182cd8 | 2005-07-11 20:57:47 -0700 | [diff] [blame] | 1000 | 				struct sctp_chunk *chunk, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1001 | 				gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | { | 
 | 1003 | 	struct sctp_ulpevent *event; | 
 | 1004 | 	struct sctp_association *asoc; | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 1005 | 	struct sctp_sock *sp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 |  | 
 | 1007 | 	asoc = ulpq->asoc; | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 1008 | 	sp = sctp_sk(asoc->base.sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 |  | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 1010 | 	/* If the association is already in Partial Delivery mode | 
 | 1011 | 	 * we have noting to do. | 
 | 1012 | 	 */ | 
 | 1013 | 	if (ulpq->pd_mode) | 
 | 1014 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 |  | 
| Vlad Yasevich | b6e1331 | 2007-04-20 12:23:15 -0700 | [diff] [blame] | 1016 | 	/* If the user enabled fragment interleave socket option, | 
 | 1017 | 	 * multiple associations can enter partial delivery. | 
 | 1018 | 	 * Otherwise, we can only enter partial delivery if the | 
 | 1019 | 	 * socket is not in partial deliver mode. | 
 | 1020 | 	 */ | 
 | 1021 | 	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | 		/* Is partial delivery possible?  */ | 
 | 1023 | 		event = sctp_ulpq_retrieve_first(ulpq); | 
 | 1024 | 		/* Send event to the ULP.   */ | 
 | 1025 | 		if (event) { | 
 | 1026 | 			sctp_ulpq_tail_event(ulpq, event); | 
| Vlad Yasevich | d49d91d | 2007-03-23 11:32:00 -0700 | [diff] [blame] | 1027 | 			sctp_ulpq_set_pd(ulpq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1028 | 			return; | 
 | 1029 | 		} | 
 | 1030 | 	} | 
 | 1031 | } | 
 | 1032 |  | 
 | 1033 | /* Renege some packets to make room for an incoming chunk.  */ | 
 | 1034 | void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1035 | 		      gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | { | 
 | 1037 | 	struct sctp_association *asoc; | 
 | 1038 | 	__u16 needed, freed; | 
 | 1039 |  | 
 | 1040 | 	asoc = ulpq->asoc; | 
 | 1041 |  | 
 | 1042 | 	if (chunk) { | 
 | 1043 | 		needed = ntohs(chunk->chunk_hdr->length); | 
 | 1044 | 		needed -= sizeof(sctp_data_chunk_t); | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 1045 | 	} else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | 		needed = SCTP_DEFAULT_MAXWINDOW; | 
 | 1047 |  | 
 | 1048 | 	freed = 0; | 
 | 1049 |  | 
 | 1050 | 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { | 
 | 1051 | 		freed = sctp_ulpq_renege_order(ulpq, needed); | 
 | 1052 | 		if (freed < needed) { | 
 | 1053 | 			freed += sctp_ulpq_renege_frags(ulpq, needed - freed); | 
 | 1054 | 		} | 
 | 1055 | 	} | 
 | 1056 | 	/* If able to free enough room, accept this chunk. */ | 
 | 1057 | 	if (chunk && (freed >= needed)) { | 
 | 1058 | 		__u32 tsn; | 
 | 1059 | 		tsn = ntohl(chunk->subh.data_hdr->tsn); | 
 | 1060 | 		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn); | 
 | 1061 | 		sctp_ulpq_tail_data(ulpq, chunk, gfp); | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 1062 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | 		sctp_ulpq_partial_delivery(ulpq, chunk, gfp); | 
 | 1064 | 	} | 
 | 1065 |  | 
| Hideo Aoki | 3ab224b | 2007-12-31 00:11:19 -0800 | [diff] [blame] | 1066 | 	sk_mem_reclaim(asoc->base.sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | 	return; | 
 | 1068 | } | 
 | 1069 |  | 
 | 1070 |  | 
 | 1071 |  | 
 | 1072 | /* Notify the application if an association is aborted and in | 
 | 1073 |  * partial delivery mode.  Send up any pending received messages. | 
 | 1074 |  */ | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1075 | void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | { | 
 | 1077 | 	struct sctp_ulpevent *ev = NULL; | 
 | 1078 | 	struct sock *sk; | 
 | 1079 |  | 
 | 1080 | 	if (!ulpq->pd_mode) | 
 | 1081 | 		return; | 
 | 1082 |  | 
 | 1083 | 	sk = ulpq->asoc->base.sk; | 
 | 1084 | 	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT, | 
 | 1085 | 				       &sctp_sk(sk)->subscribe)) | 
 | 1086 | 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc, | 
 | 1087 | 					      SCTP_PARTIAL_DELIVERY_ABORTED, | 
 | 1088 | 					      gfp); | 
 | 1089 | 	if (ev) | 
 | 1090 | 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); | 
 | 1091 |  | 
 | 1092 | 	/* If there is data waiting, send it up the socket now. */ | 
 | 1093 | 	if (sctp_ulpq_clear_pd(ulpq) || ev) | 
 | 1094 | 		sk->sk_data_ready(sk, 0); | 
 | 1095 | } |