| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 1 | /* SCTP kernel implementation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (c) 1999-2000 Cisco, Inc. | 
|  | 3 | * Copyright (c) 1999-2001 Motorola, Inc. | 
|  | 4 | * Copyright (c) 2002 International Business Machines, Corp. | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 5 | * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 6 | * This file is part of the SCTP kernel implementation | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 7 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * These functions are the methods for accessing the SCTP inqueue. | 
|  | 9 | * | 
|  | 10 | * An SCTP inqueue is a queue into which you push SCTP packets | 
|  | 11 | * (which might be bundles or fragments of chunks) and out of which you | 
|  | 12 | * pop SCTP whole chunks. | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 13 | * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 14 | * This SCTP implementation is free software; | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 15 | * you can redistribute it and/or modify it under the terms of | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | * the GNU General Public License as published by | 
|  | 17 | * the Free Software Foundation; either version 2, or (at your option) | 
|  | 18 | * any later version. | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 19 | * | 
| Vlad Yasevich | 60c778b | 2008-01-11 09:57:09 -0500 | [diff] [blame] | 20 | * This SCTP implementation is distributed in the hope that it | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 
|  | 22 | *                 ************************ | 
|  | 23 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 
|  | 24 | * See the GNU General Public License for more details. | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 25 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | * You should have received a copy of the GNU General Public License | 
|  | 27 | * along with GNU CC; see the file COPYING.  If not, write to | 
|  | 28 | * the Free Software Foundation, 59 Temple Place - Suite 330, | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 29 | * Boston, MA 02111-1307, USA. | 
|  | 30 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | * Please send any bug reports or fixes you make to the | 
|  | 32 | * email address(es): | 
|  | 33 | *    lksctp developers <lksctp-developers@lists.sourceforge.net> | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 34 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | * Or submit a bug report through the following website: | 
|  | 36 | *    http://www.sf.net/projects/lksctp | 
|  | 37 | * | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 38 | * Written or modified by: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | *    La Monte H.P. Yarroll <piggy@acm.org> | 
|  | 40 | *    Karl Knutson <karl@athena.chicago.il.us> | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 41 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | * Any bugs reported given to us we will try to fix... any fixes shared will | 
|  | 43 | * be incorporated into the next SCTP release. | 
|  | 44 | */ | 
|  | 45 |  | 
|  | 46 | #include <net/sctp/sctp.h> | 
|  | 47 | #include <net/sctp/sm.h> | 
|  | 48 | #include <linux/interrupt.h> | 
|  | 49 |  | 
|  | 50 | /* Initialize an SCTP inqueue.  */ | 
|  | 51 | void sctp_inq_init(struct sctp_inq *queue) | 
|  | 52 | { | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 53 | INIT_LIST_HEAD(&queue->in_chunk_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | queue->in_progress = NULL; | 
|  | 55 |  | 
|  | 56 | /* Create a task for delivering data.  */ | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 57 | INIT_WORK(&queue->immediate, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 |  | 
|  | 59 | queue->malloced = 0; | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | /* Release the memory associated with an SCTP inqueue.  */ | 
|  | 63 | void sctp_inq_free(struct sctp_inq *queue) | 
|  | 64 | { | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 65 | struct sctp_chunk *chunk, *tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 |  | 
|  | 67 | /* Empty the queue.  */ | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 68 | list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { | 
|  | 69 | list_del_init(&chunk->list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | sctp_chunk_free(chunk); | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 71 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 |  | 
|  | 73 | /* If there is a packet which is currently being worked on, | 
|  | 74 | * free it as well. | 
|  | 75 | */ | 
| Sridhar Samudrala | 7a48f92 | 2006-01-17 11:51:28 -0800 | [diff] [blame] | 76 | if (queue->in_progress) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | sctp_chunk_free(queue->in_progress); | 
| Sridhar Samudrala | 7a48f92 | 2006-01-17 11:51:28 -0800 | [diff] [blame] | 78 | queue->in_progress = NULL; | 
|  | 79 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 |  | 
|  | 81 | if (queue->malloced) { | 
|  | 82 | /* Dump the master memory segment.  */ | 
|  | 83 | kfree(queue); | 
|  | 84 | } | 
|  | 85 | } | 
|  | 86 |  | 
|  | 87 | /* Put a new packet in an SCTP inqueue. | 
|  | 88 | * We assume that packet->sctp_hdr is set and in host byte order. | 
|  | 89 | */ | 
| Sridhar Samudrala | ac0b046 | 2006-08-22 00:15:33 -0700 | [diff] [blame] | 90 | void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | { | 
|  | 92 | /* Directly call the packet handling routine. */ | 
| Vlad Yasevich | 027f6e1 | 2007-11-07 11:39:27 -0500 | [diff] [blame] | 93 | if (chunk->rcvr->dead) { | 
|  | 94 | sctp_chunk_free(chunk); | 
|  | 95 | return; | 
|  | 96 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 |  | 
|  | 98 | /* We are now calling this either from the soft interrupt | 
|  | 99 | * or from the backlog processing. | 
|  | 100 | * Eventually, we should clean up inqueue to not rely | 
|  | 101 | * on the BH related data structures. | 
|  | 102 | */ | 
| Sridhar Samudrala | ac0b046 | 2006-08-22 00:15:33 -0700 | [diff] [blame] | 103 | list_add_tail(&chunk->list, &q->in_chunk_list); | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 104 | q->immediate.func(&q->immediate); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | } | 
|  | 106 |  | 
| Vlad Yasevich | bbd0d59 | 2007-10-03 17:51:34 -0700 | [diff] [blame] | 107 | /* Peek at the next chunk on the inqeue. */ | 
|  | 108 | struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) | 
|  | 109 | { | 
|  | 110 | struct sctp_chunk *chunk; | 
|  | 111 | sctp_chunkhdr_t *ch = NULL; | 
|  | 112 |  | 
|  | 113 | chunk = queue->in_progress; | 
|  | 114 | /* If there is no more chunks in this packet, say so */ | 
|  | 115 | if (chunk->singleton || | 
|  | 116 | chunk->end_of_packet || | 
|  | 117 | chunk->pdiscard) | 
|  | 118 | return NULL; | 
|  | 119 |  | 
|  | 120 | ch = (sctp_chunkhdr_t *)chunk->chunk_end; | 
|  | 121 |  | 
|  | 122 | return ch; | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | /* Extract a chunk from an SCTP inqueue. | 
|  | 127 | * | 
|  | 128 | * WARNING:  If you need to put the chunk on another queue, you need to | 
|  | 129 | * make a shallow copy (clone) of it. | 
|  | 130 | */ | 
|  | 131 | struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | 
|  | 132 | { | 
|  | 133 | struct sctp_chunk *chunk; | 
|  | 134 | sctp_chunkhdr_t *ch = NULL; | 
|  | 135 |  | 
|  | 136 | /* The assumption is that we are safe to process the chunks | 
|  | 137 | * at this time. | 
|  | 138 | */ | 
|  | 139 |  | 
|  | 140 | if ((chunk = queue->in_progress)) { | 
|  | 141 | /* There is a packet that we have been working on. | 
|  | 142 | * Any post processing work to do before we move on? | 
|  | 143 | */ | 
|  | 144 | if (chunk->singleton || | 
|  | 145 | chunk->end_of_packet || | 
|  | 146 | chunk->pdiscard) { | 
|  | 147 | sctp_chunk_free(chunk); | 
|  | 148 | chunk = queue->in_progress = NULL; | 
|  | 149 | } else { | 
|  | 150 | /* Nothing to do. Next chunk in the packet, please. */ | 
|  | 151 | ch = (sctp_chunkhdr_t *) chunk->chunk_end; | 
|  | 152 |  | 
|  | 153 | /* Force chunk->skb->data to chunk->chunk_end.  */ | 
|  | 154 | skb_pull(chunk->skb, | 
|  | 155 | chunk->chunk_end - chunk->skb->data); | 
| Vlad Yasevich | a09c838 | 2007-09-05 15:53:58 -0400 | [diff] [blame] | 156 |  | 
|  | 157 | /* Verify that we have at least chunk headers | 
|  | 158 | * worth of buffer left. | 
|  | 159 | */ | 
|  | 160 | if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) { | 
|  | 161 | sctp_chunk_free(chunk); | 
|  | 162 | chunk = queue->in_progress = NULL; | 
|  | 163 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | } | 
|  | 165 | } | 
|  | 166 |  | 
|  | 167 | /* Do we need to take the next packet out of the queue to process? */ | 
|  | 168 | if (!chunk) { | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 169 | struct list_head *entry; | 
|  | 170 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | /* Is the queue empty?  */ | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 172 | if (list_empty(&queue->in_chunk_list)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | return NULL; | 
|  | 174 |  | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 175 | entry = queue->in_chunk_list.next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | chunk = queue->in_progress = | 
| David S. Miller | 79af02c | 2005-07-08 21:47:49 -0700 | [diff] [blame] | 177 | list_entry(entry, struct sctp_chunk, list); | 
|  | 178 | list_del_init(entry); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  | 
|  | 180 | /* This is the first chunk in the packet.  */ | 
|  | 181 | chunk->singleton = 1; | 
|  | 182 | ch = (sctp_chunkhdr_t *) chunk->skb->data; | 
| Neil Horman | 7c3ceb4f | 2006-05-05 17:02:09 -0700 | [diff] [blame] | 183 | chunk->data_accepted = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | } | 
|  | 185 |  | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 186 | chunk->chunk_hdr = ch; | 
|  | 187 | chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | /* In the unlikely case of an IP reassembly, the skb could be | 
|  | 189 | * non-linear. If so, update chunk_end so that it doesn't go past | 
|  | 190 | * the skb->tail. | 
|  | 191 | */ | 
|  | 192 | if (unlikely(skb_is_nonlinear(chunk->skb))) { | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 193 | if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) | 
|  | 194 | chunk->chunk_end = skb_tail_pointer(chunk->skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | } | 
|  | 196 | skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); | 
|  | 197 | chunk->subh.v = NULL; /* Subheader is no longer valid.  */ | 
|  | 198 |  | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 199 | if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | /* This is not a singleton */ | 
|  | 201 | chunk->singleton = 0; | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 202 | } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 203 | /* RFC 2960, Section 6.10  Bundling | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | * | 
|  | 205 | * Partial chunks MUST NOT be placed in an SCTP packet. | 
|  | 206 | * If the receiver detects a partial chunk, it MUST drop | 
| YOSHIFUJI Hideaki | d808ad9 | 2007-02-09 23:25:18 +0900 | [diff] [blame] | 207 | * the chunk. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | * | 
|  | 209 | * Since the end of the chunk is past the end of our buffer | 
|  | 210 | * (which contains the whole packet, we can freely discard | 
|  | 211 | * the whole packet. | 
|  | 212 | */ | 
|  | 213 | sctp_chunk_free(chunk); | 
|  | 214 | chunk = queue->in_progress = NULL; | 
|  | 215 |  | 
|  | 216 | return NULL; | 
|  | 217 | } else { | 
|  | 218 | /* We are at the end of the packet, so mark the chunk | 
|  | 219 | * in case we need to send a SACK. | 
|  | 220 | */ | 
|  | 221 | chunk->end_of_packet = 1; | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | SCTP_DEBUG_PRINTK("+++sctp_inq_pop+++ chunk %p[%s]," | 
|  | 225 | " length %d, skb->len %d\n",chunk, | 
|  | 226 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), | 
|  | 227 | ntohs(chunk->chunk_hdr->length), chunk->skb->len); | 
|  | 228 | return chunk; | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 | /* Set a top-half handler. | 
|  | 232 | * | 
|  | 233 | * Originally, we the top-half handler was scheduled as a BH.  We now | 
|  | 234 | * call the handler directly in sctp_inq_push() at a time that | 
|  | 235 | * we know we are lock safe. | 
|  | 236 | * The intent is that this routine will pull stuff out of the | 
|  | 237 | * inqueue and process it. | 
|  | 238 | */ | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 239 | void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | { | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 241 | INIT_WORK(&q->immediate, callback); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | } | 
|  | 243 |  |