| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
|  | 3 | *		operating system.  INET is implemented using the  BSD Socket | 
|  | 4 | *		interface as the means of communication with the user level. | 
|  | 5 | * | 
|  | 6 | *		Definitions for the AF_INET socket handler. | 
|  | 7 | * | 
|  | 8 | * Version:	@(#)sock.h	1.0.4	05/13/93 | 
|  | 9 | * | 
| Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 10 | * Authors:	Ross Biro | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 
|  | 12 | *		Corey Minyard <wf-rch!minyard@relay.EU.net> | 
|  | 13 | *		Florian La Roche <flla@stud.uni-sb.de> | 
|  | 14 | * | 
|  | 15 | * Fixes: | 
|  | 16 | *		Alan Cox	:	Volatiles in skbuff pointers. See | 
|  | 17 | *					skbuff comments. May be overdone, | 
|  | 18 | *					better to prove they can be removed | 
|  | 19 | *					than the reverse. | 
|  | 20 | *		Alan Cox	:	Added a zapped field for tcp to note | 
|  | 21 | *					a socket is reset and must stay shut up | 
|  | 22 | *		Alan Cox	:	New fields for options | 
|  | 23 | *	Pauline Middelink	:	identd support | 
|  | 24 | *		Alan Cox	:	Eliminate low level recv/recvfrom | 
|  | 25 | *		David S. Miller	:	New socket lookup architecture. | 
|  | 26 | *              Steve Whitehouse:       Default routines for sock_ops | 
|  | 27 | *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made | 
|  | 28 | *              			protinfo be just a void pointer, as the | 
|  | 29 | *              			protocol specific parts were moved to | 
|  | 30 | *              			respective headers and ipv4/v6, etc now | 
|  | 31 | *              			use private slabcaches for its socks | 
|  | 32 | *              Pedro Hortas	:	New flags field for socket options | 
|  | 33 | * | 
|  | 34 | * | 
|  | 35 | *		This program is free software; you can redistribute it and/or | 
|  | 36 | *		modify it under the terms of the GNU General Public License | 
|  | 37 | *		as published by the Free Software Foundation; either version | 
|  | 38 | *		2 of the License, or (at your option) any later version. | 
|  | 39 | */ | 
|  | 40 | #ifndef _SOCK_H | 
|  | 41 | #define _SOCK_H | 
|  | 42 |  | 
|  | 43 | #include <linux/config.h> | 
|  | 44 | #include <linux/list.h> | 
|  | 45 | #include <linux/timer.h> | 
|  | 46 | #include <linux/cache.h> | 
|  | 47 | #include <linux/module.h> | 
|  | 48 | #include <linux/netdevice.h> | 
|  | 49 | #include <linux/skbuff.h>	/* struct sk_buff */ | 
|  | 50 | #include <linux/security.h> | 
|  | 51 |  | 
|  | 52 | #include <linux/filter.h> | 
|  | 53 |  | 
|  | 54 | #include <asm/atomic.h> | 
|  | 55 | #include <net/dst.h> | 
|  | 56 | #include <net/checksum.h> | 
|  | 57 |  | 
|  | 58 | /* | 
|  | 59 | * This structure really needs to be cleaned up. | 
|  | 60 | * Most of it is for TCP, and not used by any of | 
|  | 61 | * the other protocols. | 
|  | 62 | */ | 
|  | 63 |  | 
|  | 64 | /* Define this to get the SOCK_DBG debugging facility. */ | 
|  | 65 | #define SOCK_DEBUGGING | 
|  | 66 | #ifdef SOCK_DEBUGGING | 
|  | 67 | #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ | 
|  | 68 | printk(KERN_DEBUG msg); } while (0) | 
|  | 69 | #else | 
|  | 70 | #define SOCK_DEBUG(sk, msg...) do { } while (0) | 
|  | 71 | #endif | 
|  | 72 |  | 
|  | 73 | /* This is the per-socket lock.  The spinlock provides a synchronization | 
|  | 74 | * between user contexts and software interrupt processing, whereas the | 
|  | 75 | * mini-semaphore synchronizes multiple users amongst themselves. | 
|  | 76 | */ | 
|  | 77 | struct sock_iocb; | 
|  | 78 | typedef struct { | 
|  | 79 | spinlock_t		slock; | 
|  | 80 | struct sock_iocb	*owner; | 
|  | 81 | wait_queue_head_t	wq; | 
|  | 82 | } socket_lock_t; | 
|  | 83 |  | 
|  | 84 | #define sock_lock_init(__sk) \ | 
|  | 85 | do {	spin_lock_init(&((__sk)->sk_lock.slock)); \ | 
|  | 86 | (__sk)->sk_lock.owner = NULL; \ | 
|  | 87 | init_waitqueue_head(&((__sk)->sk_lock.wq)); \ | 
|  | 88 | } while(0) | 
|  | 89 |  | 
|  | 90 | struct sock; | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 91 | struct proto; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 |  | 
|  | 93 | /** | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 94 | *	struct sock_common - minimal network layer representation of sockets | 
|  | 95 | *	@skc_family: network address family | 
|  | 96 | *	@skc_state: Connection state | 
|  | 97 | *	@skc_reuse: %SO_REUSEADDR setting | 
|  | 98 | *	@skc_bound_dev_if: bound device index if != 0 | 
|  | 99 | *	@skc_node: main hash linkage for various protocol lookup tables | 
|  | 100 | *	@skc_bind_node: bind hash linkage for various protocol lookup tables | 
|  | 101 | *	@skc_refcnt: reference count | 
| Eric Dumazet | 81c3d54 | 2005-10-03 14:13:38 -0700 | [diff] [blame] | 102 | *	@skc_hash: hash value used with various protocol lookup tables | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 103 | *	@skc_prot: protocol handlers inside a network family | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 104 | * | 
|  | 105 | *	This is the minimal network layer representation of sockets, the header | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 106 | *	for struct sock and struct inet_timewait_sock. | 
|  | 107 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | struct sock_common { | 
|  | 109 | unsigned short		skc_family; | 
|  | 110 | volatile unsigned char	skc_state; | 
|  | 111 | unsigned char		skc_reuse; | 
|  | 112 | int			skc_bound_dev_if; | 
|  | 113 | struct hlist_node	skc_node; | 
|  | 114 | struct hlist_node	skc_bind_node; | 
|  | 115 | atomic_t		skc_refcnt; | 
| Eric Dumazet | 81c3d54 | 2005-10-03 14:13:38 -0700 | [diff] [blame] | 116 | unsigned int		skc_hash; | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 117 | struct proto		*skc_prot; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | }; | 
|  | 119 |  | 
|  | 120 | /** | 
|  | 121 | *	struct sock - network layer representation of sockets | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 122 | *	@__sk_common: shared layout with inet_timewait_sock | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 123 | *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN | 
|  | 124 | *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings | 
|  | 125 | *	@sk_lock:	synchronizer | 
|  | 126 | *	@sk_rcvbuf: size of receive buffer in bytes | 
|  | 127 | *	@sk_sleep: sock wait queue | 
|  | 128 | *	@sk_dst_cache: destination cache | 
|  | 129 | *	@sk_dst_lock: destination cache lock | 
|  | 130 | *	@sk_policy: flow policy | 
|  | 131 | *	@sk_rmem_alloc: receive queue bytes committed | 
|  | 132 | *	@sk_receive_queue: incoming packets | 
|  | 133 | *	@sk_wmem_alloc: transmit queue bytes committed | 
|  | 134 | *	@sk_write_queue: Packet sending queue | 
|  | 135 | *	@sk_omem_alloc: "o" is "option" or "other" | 
|  | 136 | *	@sk_wmem_queued: persistent queue size | 
|  | 137 | *	@sk_forward_alloc: space allocated forward | 
|  | 138 | *	@sk_allocation: allocation mode | 
|  | 139 | *	@sk_sndbuf: size of send buffer in bytes | 
|  | 140 | *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings | 
|  | 141 | *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets | 
|  | 142 | *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) | 
|  | 143 | *	@sk_lingertime: %SO_LINGER l_linger setting | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 144 | *	@sk_backlog: always used with the per-socket spinlock held | 
|  | 145 | *	@sk_callback_lock: used with the callbacks in the end of this struct | 
|  | 146 | *	@sk_error_queue: rarely used | 
| Arnaldo Carvalho de Melo | 476e19c | 2005-05-05 13:35:15 -0700 | [diff] [blame] | 147 | *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 148 | *	@sk_err: last error | 
|  | 149 | *	@sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' | 
|  | 150 | *	@sk_ack_backlog: current listen backlog | 
|  | 151 | *	@sk_max_ack_backlog: listen backlog set in listen() | 
|  | 152 | *	@sk_priority: %SO_PRIORITY setting | 
|  | 153 | *	@sk_type: socket type (%SOCK_STREAM, etc) | 
|  | 154 | *	@sk_protocol: which protocol this socket belongs in this network family | 
|  | 155 | *	@sk_peercred: %SO_PEERCRED setting | 
|  | 156 | *	@sk_rcvlowat: %SO_RCVLOWAT setting | 
|  | 157 | *	@sk_rcvtimeo: %SO_RCVTIMEO setting | 
|  | 158 | *	@sk_sndtimeo: %SO_SNDTIMEO setting | 
|  | 159 | *	@sk_filter: socket filtering instructions | 
|  | 160 | *	@sk_protinfo: private area, net family specific, when not using slab | 
|  | 161 | *	@sk_timer: sock cleanup timer | 
|  | 162 | *	@sk_stamp: time stamp of last packet received | 
|  | 163 | *	@sk_socket: Identd and reporting IO signals | 
|  | 164 | *	@sk_user_data: RPC layer private data | 
|  | 165 | *	@sk_sndmsg_page: cached page for sendmsg | 
|  | 166 | *	@sk_sndmsg_off: cached offset for sendmsg | 
|  | 167 | *	@sk_send_head: front of stuff to transmit | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 168 | *	@sk_security: used by security modules | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 169 | *	@sk_write_pending: a write to stream socket waits to start | 
|  | 170 | *	@sk_state_change: callback to indicate change in the state of the sock | 
|  | 171 | *	@sk_data_ready: callback to indicate there is data to be processed | 
|  | 172 | *	@sk_write_space: callback to indicate there is bf sending space available | 
|  | 173 | *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) | 
|  | 174 | *	@sk_backlog_rcv: callback to process the backlog | 
|  | 175 | *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | */ | 
|  | 177 | struct sock { | 
|  | 178 | /* | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 179 | * Now struct inet_timewait_sock also uses sock_common, so please just | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | * don't add nothing before this first member (__sk_common) --acme | 
|  | 181 | */ | 
|  | 182 | struct sock_common	__sk_common; | 
|  | 183 | #define sk_family		__sk_common.skc_family | 
|  | 184 | #define sk_state		__sk_common.skc_state | 
|  | 185 | #define sk_reuse		__sk_common.skc_reuse | 
|  | 186 | #define sk_bound_dev_if		__sk_common.skc_bound_dev_if | 
|  | 187 | #define sk_node			__sk_common.skc_node | 
|  | 188 | #define sk_bind_node		__sk_common.skc_bind_node | 
|  | 189 | #define sk_refcnt		__sk_common.skc_refcnt | 
| Eric Dumazet | 81c3d54 | 2005-10-03 14:13:38 -0700 | [diff] [blame] | 190 | #define sk_hash			__sk_common.skc_hash | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 191 | #define sk_prot			__sk_common.skc_prot | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | unsigned char		sk_shutdown : 2, | 
|  | 193 | sk_no_check : 2, | 
|  | 194 | sk_userlocks : 4; | 
|  | 195 | unsigned char		sk_protocol; | 
|  | 196 | unsigned short		sk_type; | 
|  | 197 | int			sk_rcvbuf; | 
|  | 198 | socket_lock_t		sk_lock; | 
|  | 199 | wait_queue_head_t	*sk_sleep; | 
|  | 200 | struct dst_entry	*sk_dst_cache; | 
|  | 201 | struct xfrm_policy	*sk_policy[2]; | 
|  | 202 | rwlock_t		sk_dst_lock; | 
|  | 203 | atomic_t		sk_rmem_alloc; | 
|  | 204 | atomic_t		sk_wmem_alloc; | 
|  | 205 | atomic_t		sk_omem_alloc; | 
|  | 206 | struct sk_buff_head	sk_receive_queue; | 
|  | 207 | struct sk_buff_head	sk_write_queue; | 
|  | 208 | int			sk_wmem_queued; | 
|  | 209 | int			sk_forward_alloc; | 
| Al Viro | 7d877f3 | 2005-10-21 03:20:43 -0400 | [diff] [blame] | 210 | gfp_t			sk_allocation; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | int			sk_sndbuf; | 
|  | 212 | int			sk_route_caps; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | unsigned long 		sk_flags; | 
|  | 214 | unsigned long	        sk_lingertime; | 
|  | 215 | /* | 
|  | 216 | * The backlog queue is special, it is always used with | 
|  | 217 | * the per-socket spinlock held and requires low latency | 
|  | 218 | * access. Therefore we special case it's implementation. | 
|  | 219 | */ | 
|  | 220 | struct { | 
|  | 221 | struct sk_buff *head; | 
|  | 222 | struct sk_buff *tail; | 
|  | 223 | } sk_backlog; | 
|  | 224 | struct sk_buff_head	sk_error_queue; | 
| Arnaldo Carvalho de Melo | 476e19c | 2005-05-05 13:35:15 -0700 | [diff] [blame] | 225 | struct proto		*sk_prot_creator; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | rwlock_t		sk_callback_lock; | 
|  | 227 | int			sk_err, | 
|  | 228 | sk_err_soft; | 
|  | 229 | unsigned short		sk_ack_backlog; | 
|  | 230 | unsigned short		sk_max_ack_backlog; | 
|  | 231 | __u32			sk_priority; | 
|  | 232 | struct ucred		sk_peercred; | 
|  | 233 | int			sk_rcvlowat; | 
|  | 234 | long			sk_rcvtimeo; | 
|  | 235 | long			sk_sndtimeo; | 
|  | 236 | struct sk_filter      	*sk_filter; | 
|  | 237 | void			*sk_protinfo; | 
|  | 238 | struct timer_list	sk_timer; | 
|  | 239 | struct timeval		sk_stamp; | 
|  | 240 | struct socket		*sk_socket; | 
|  | 241 | void			*sk_user_data; | 
|  | 242 | struct page		*sk_sndmsg_page; | 
|  | 243 | struct sk_buff		*sk_send_head; | 
|  | 244 | __u32			sk_sndmsg_off; | 
|  | 245 | int			sk_write_pending; | 
|  | 246 | void			*sk_security; | 
|  | 247 | void			(*sk_state_change)(struct sock *sk); | 
|  | 248 | void			(*sk_data_ready)(struct sock *sk, int bytes); | 
|  | 249 | void			(*sk_write_space)(struct sock *sk); | 
|  | 250 | void			(*sk_error_report)(struct sock *sk); | 
|  | 251 | int			(*sk_backlog_rcv)(struct sock *sk, | 
|  | 252 | struct sk_buff *skb); | 
|  | 253 | void                    (*sk_destruct)(struct sock *sk); | 
|  | 254 | }; | 
|  | 255 |  | 
|  | 256 | /* | 
|  | 257 | * Hashed lists helper routines | 
|  | 258 | */ | 
| Arnaldo Carvalho de Melo | e48c414 | 2005-08-09 20:09:46 -0700 | [diff] [blame] | 259 | static inline struct sock *__sk_head(const struct hlist_head *head) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | { | 
|  | 261 | return hlist_entry(head->first, struct sock, sk_node); | 
|  | 262 | } | 
|  | 263 |  | 
| Arnaldo Carvalho de Melo | e48c414 | 2005-08-09 20:09:46 -0700 | [diff] [blame] | 264 | static inline struct sock *sk_head(const struct hlist_head *head) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | { | 
|  | 266 | return hlist_empty(head) ? NULL : __sk_head(head); | 
|  | 267 | } | 
|  | 268 |  | 
| Arnaldo Carvalho de Melo | e48c414 | 2005-08-09 20:09:46 -0700 | [diff] [blame] | 269 | static inline struct sock *sk_next(const struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | { | 
|  | 271 | return sk->sk_node.next ? | 
|  | 272 | hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; | 
|  | 273 | } | 
|  | 274 |  | 
| Arnaldo Carvalho de Melo | e48c414 | 2005-08-09 20:09:46 -0700 | [diff] [blame] | 275 | static inline int sk_unhashed(const struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { | 
|  | 277 | return hlist_unhashed(&sk->sk_node); | 
|  | 278 | } | 
|  | 279 |  | 
| Arnaldo Carvalho de Melo | e48c414 | 2005-08-09 20:09:46 -0700 | [diff] [blame] | 280 | static inline int sk_hashed(const struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | { | 
|  | 282 | return sk->sk_node.pprev != NULL; | 
|  | 283 | } | 
|  | 284 |  | 
|  | 285 | static __inline__ void sk_node_init(struct hlist_node *node) | 
|  | 286 | { | 
|  | 287 | node->pprev = NULL; | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static __inline__ void __sk_del_node(struct sock *sk) | 
|  | 291 | { | 
|  | 292 | __hlist_del(&sk->sk_node); | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | static __inline__ int __sk_del_node_init(struct sock *sk) | 
|  | 296 | { | 
|  | 297 | if (sk_hashed(sk)) { | 
|  | 298 | __sk_del_node(sk); | 
|  | 299 | sk_node_init(&sk->sk_node); | 
|  | 300 | return 1; | 
|  | 301 | } | 
|  | 302 | return 0; | 
|  | 303 | } | 
|  | 304 |  | 
|  | 305 | /* Grab socket reference count. This operation is valid only | 
|  | 306 | when sk is ALREADY grabbed f.e. it is found in hash table | 
|  | 307 | or a list and the lookup is made under lock preventing hash table | 
|  | 308 | modifications. | 
|  | 309 | */ | 
|  | 310 |  | 
|  | 311 | static inline void sock_hold(struct sock *sk) | 
|  | 312 | { | 
|  | 313 | atomic_inc(&sk->sk_refcnt); | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | /* Ungrab socket in the context, which assumes that socket refcnt | 
|  | 317 | cannot hit zero, f.e. it is true in context of any socketcall. | 
|  | 318 | */ | 
|  | 319 | static inline void __sock_put(struct sock *sk) | 
|  | 320 | { | 
|  | 321 | atomic_dec(&sk->sk_refcnt); | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | static __inline__ int sk_del_node_init(struct sock *sk) | 
|  | 325 | { | 
|  | 326 | int rc = __sk_del_node_init(sk); | 
|  | 327 |  | 
|  | 328 | if (rc) { | 
|  | 329 | /* paranoid for a while -acme */ | 
|  | 330 | WARN_ON(atomic_read(&sk->sk_refcnt) == 1); | 
|  | 331 | __sock_put(sk); | 
|  | 332 | } | 
|  | 333 | return rc; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) | 
|  | 337 | { | 
|  | 338 | hlist_add_head(&sk->sk_node, list); | 
|  | 339 | } | 
|  | 340 |  | 
|  | 341 | static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) | 
|  | 342 | { | 
|  | 343 | sock_hold(sk); | 
|  | 344 | __sk_add_node(sk, list); | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | static __inline__ void __sk_del_bind_node(struct sock *sk) | 
|  | 348 | { | 
|  | 349 | __hlist_del(&sk->sk_bind_node); | 
|  | 350 | } | 
|  | 351 |  | 
|  | 352 | static __inline__ void sk_add_bind_node(struct sock *sk, | 
|  | 353 | struct hlist_head *list) | 
|  | 354 | { | 
|  | 355 | hlist_add_head(&sk->sk_bind_node, list); | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | #define sk_for_each(__sk, node, list) \ | 
|  | 359 | hlist_for_each_entry(__sk, node, list, sk_node) | 
|  | 360 | #define sk_for_each_from(__sk, node) \ | 
|  | 361 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ | 
|  | 362 | hlist_for_each_entry_from(__sk, node, sk_node) | 
|  | 363 | #define sk_for_each_continue(__sk, node) \ | 
|  | 364 | if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ | 
|  | 365 | hlist_for_each_entry_continue(__sk, node, sk_node) | 
|  | 366 | #define sk_for_each_safe(__sk, node, tmp, list) \ | 
|  | 367 | hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) | 
|  | 368 | #define sk_for_each_bound(__sk, node, list) \ | 
|  | 369 | hlist_for_each_entry(__sk, node, list, sk_bind_node) | 
|  | 370 |  | 
|  | 371 | /* Sock flags */ | 
|  | 372 | enum sock_flags { | 
|  | 373 | SOCK_DEAD, | 
|  | 374 | SOCK_DONE, | 
|  | 375 | SOCK_URGINLINE, | 
|  | 376 | SOCK_KEEPOPEN, | 
|  | 377 | SOCK_LINGER, | 
|  | 378 | SOCK_DESTROY, | 
|  | 379 | SOCK_BROADCAST, | 
|  | 380 | SOCK_TIMESTAMP, | 
|  | 381 | SOCK_ZAPPED, | 
|  | 382 | SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ | 
|  | 383 | SOCK_DBG, /* %SO_DEBUG setting */ | 
|  | 384 | SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ | 
|  | 385 | SOCK_NO_LARGESEND, /* whether to sent large segments or not */ | 
|  | 386 | SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ | 
|  | 387 | SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ | 
|  | 388 | }; | 
|  | 389 |  | 
| Ralf Baechle | 53b924b | 2005-08-23 10:11:30 -0700 | [diff] [blame] | 390 | static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) | 
|  | 391 | { | 
|  | 392 | nsk->sk_flags = osk->sk_flags; | 
|  | 393 | } | 
|  | 394 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) | 
|  | 396 | { | 
|  | 397 | __set_bit(flag, &sk->sk_flags); | 
|  | 398 | } | 
|  | 399 |  | 
|  | 400 | static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) | 
|  | 401 | { | 
|  | 402 | __clear_bit(flag, &sk->sk_flags); | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | static inline int sock_flag(struct sock *sk, enum sock_flags flag) | 
|  | 406 | { | 
|  | 407 | return test_bit(flag, &sk->sk_flags); | 
|  | 408 | } | 
|  | 409 |  | 
|  | 410 | static inline void sk_acceptq_removed(struct sock *sk) | 
|  | 411 | { | 
|  | 412 | sk->sk_ack_backlog--; | 
|  | 413 | } | 
|  | 414 |  | 
|  | 415 | static inline void sk_acceptq_added(struct sock *sk) | 
|  | 416 | { | 
|  | 417 | sk->sk_ack_backlog++; | 
|  | 418 | } | 
|  | 419 |  | 
|  | 420 | static inline int sk_acceptq_is_full(struct sock *sk) | 
|  | 421 | { | 
|  | 422 | return sk->sk_ack_backlog > sk->sk_max_ack_backlog; | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | /* | 
|  | 426 | * Compute minimal free write space needed to queue new packets. | 
|  | 427 | */ | 
|  | 428 | static inline int sk_stream_min_wspace(struct sock *sk) | 
|  | 429 | { | 
|  | 430 | return sk->sk_wmem_queued / 2; | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | static inline int sk_stream_wspace(struct sock *sk) | 
|  | 434 | { | 
|  | 435 | return sk->sk_sndbuf - sk->sk_wmem_queued; | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | extern void sk_stream_write_space(struct sock *sk); | 
|  | 439 |  | 
|  | 440 | static inline int sk_stream_memory_free(struct sock *sk) | 
|  | 441 | { | 
|  | 442 | return sk->sk_wmem_queued < sk->sk_sndbuf; | 
|  | 443 | } | 
|  | 444 |  | 
|  | 445 | extern void sk_stream_rfree(struct sk_buff *skb); | 
|  | 446 |  | 
|  | 447 | static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) | 
|  | 448 | { | 
|  | 449 | skb->sk = sk; | 
|  | 450 | skb->destructor = sk_stream_rfree; | 
|  | 451 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | 
|  | 452 | sk->sk_forward_alloc -= skb->truesize; | 
|  | 453 | } | 
|  | 454 |  | 
|  | 455 | static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) | 
|  | 456 | { | 
|  | 457 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); | 
|  | 458 | sk->sk_wmem_queued   -= skb->truesize; | 
|  | 459 | sk->sk_forward_alloc += skb->truesize; | 
|  | 460 | __kfree_skb(skb); | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | /* The per-socket spinlock must be held here. */ | 
|  | 464 | #define sk_add_backlog(__sk, __skb)				\ | 
|  | 465 | do {	if (!(__sk)->sk_backlog.tail) {				\ | 
|  | 466 | (__sk)->sk_backlog.head =			\ | 
|  | 467 | (__sk)->sk_backlog.tail = (__skb);		\ | 
|  | 468 | } else {						\ | 
|  | 469 | ((__sk)->sk_backlog.tail)->next = (__skb);	\ | 
|  | 470 | (__sk)->sk_backlog.tail = (__skb);		\ | 
|  | 471 | }							\ | 
|  | 472 | (__skb)->next = NULL;					\ | 
|  | 473 | } while(0) | 
|  | 474 |  | 
|  | 475 | #define sk_wait_event(__sk, __timeo, __condition)		\ | 
|  | 476 | ({	int rc;							\ | 
|  | 477 | release_sock(__sk);					\ | 
|  | 478 | rc = __condition;					\ | 
|  | 479 | if (!rc) {						\ | 
|  | 480 | *(__timeo) = schedule_timeout(*(__timeo));	\ | 
|  | 481 | rc = __condition;				\ | 
|  | 482 | }							\ | 
|  | 483 | lock_sock(__sk);					\ | 
|  | 484 | rc;							\ | 
|  | 485 | }) | 
|  | 486 |  | 
|  | 487 | extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); | 
|  | 488 | extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); | 
|  | 489 | extern void sk_stream_wait_close(struct sock *sk, long timeo_p); | 
|  | 490 | extern int sk_stream_error(struct sock *sk, int flags, int err); | 
|  | 491 | extern void sk_stream_kill_queues(struct sock *sk); | 
|  | 492 |  | 
|  | 493 | extern int sk_wait_data(struct sock *sk, long *timeo); | 
|  | 494 |  | 
| Arnaldo Carvalho de Melo | 60236fd | 2005-06-18 22:47:21 -0700 | [diff] [blame] | 495 | struct request_sock_ops; | 
| Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 496 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | /* Networking protocol blocks we attach to sockets. | 
|  | 498 | * socket layer -> transport layer interface | 
|  | 499 | * transport -> network interface is defined by struct inet_proto | 
|  | 500 | */ | 
|  | 501 | struct proto { | 
|  | 502 | void			(*close)(struct sock *sk, | 
|  | 503 | long timeout); | 
|  | 504 | int			(*connect)(struct sock *sk, | 
|  | 505 | struct sockaddr *uaddr, | 
|  | 506 | int addr_len); | 
|  | 507 | int			(*disconnect)(struct sock *sk, int flags); | 
|  | 508 |  | 
|  | 509 | struct sock *		(*accept) (struct sock *sk, int flags, int *err); | 
|  | 510 |  | 
|  | 511 | int			(*ioctl)(struct sock *sk, int cmd, | 
|  | 512 | unsigned long arg); | 
|  | 513 | int			(*init)(struct sock *sk); | 
|  | 514 | int			(*destroy)(struct sock *sk); | 
|  | 515 | void			(*shutdown)(struct sock *sk, int how); | 
|  | 516 | int			(*setsockopt)(struct sock *sk, int level, | 
|  | 517 | int optname, char __user *optval, | 
|  | 518 | int optlen); | 
|  | 519 | int			(*getsockopt)(struct sock *sk, int level, | 
|  | 520 | int optname, char __user *optval, | 
|  | 521 | int __user *option); | 
|  | 522 | int			(*sendmsg)(struct kiocb *iocb, struct sock *sk, | 
|  | 523 | struct msghdr *msg, size_t len); | 
|  | 524 | int			(*recvmsg)(struct kiocb *iocb, struct sock *sk, | 
|  | 525 | struct msghdr *msg, | 
|  | 526 | size_t len, int noblock, int flags, | 
|  | 527 | int *addr_len); | 
|  | 528 | int			(*sendpage)(struct sock *sk, struct page *page, | 
|  | 529 | int offset, size_t size, int flags); | 
|  | 530 | int			(*bind)(struct sock *sk, | 
|  | 531 | struct sockaddr *uaddr, int addr_len); | 
|  | 532 |  | 
|  | 533 | int			(*backlog_rcv) (struct sock *sk, | 
|  | 534 | struct sk_buff *skb); | 
|  | 535 |  | 
|  | 536 | /* Keeping track of sk's, looking them up, and port selection methods. */ | 
|  | 537 | void			(*hash)(struct sock *sk); | 
|  | 538 | void			(*unhash)(struct sock *sk); | 
|  | 539 | int			(*get_port)(struct sock *sk, unsigned short snum); | 
|  | 540 |  | 
|  | 541 | /* Memory pressure */ | 
|  | 542 | void			(*enter_memory_pressure)(void); | 
|  | 543 | atomic_t		*memory_allocated;	/* Current allocated memory. */ | 
|  | 544 | atomic_t		*sockets_allocated;	/* Current number of sockets. */ | 
|  | 545 | /* | 
|  | 546 | * Pressure flag: try to collapse. | 
|  | 547 | * Technical note: it is used by multiple contexts non atomically. | 
|  | 548 | * All the sk_stream_mem_schedule() is of this nature: accounting | 
|  | 549 | * is strict, actions are advisory and have some latency. | 
|  | 550 | */ | 
|  | 551 | int			*memory_pressure; | 
|  | 552 | int			*sysctl_mem; | 
|  | 553 | int			*sysctl_wmem; | 
|  | 554 | int			*sysctl_rmem; | 
|  | 555 | int			max_header; | 
|  | 556 |  | 
|  | 557 | kmem_cache_t		*slab; | 
|  | 558 | unsigned int		obj_size; | 
|  | 559 |  | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 560 | kmem_cache_t		*twsk_slab; | 
|  | 561 | unsigned int		twsk_obj_size; | 
| Arnaldo Carvalho de Melo | 0a5578c | 2005-08-09 20:11:41 -0700 | [diff] [blame] | 562 | atomic_t		*orphan_count; | 
| Arnaldo Carvalho de Melo | 8feaf0c | 2005-08-09 20:09:30 -0700 | [diff] [blame] | 563 |  | 
| Arnaldo Carvalho de Melo | 60236fd | 2005-06-18 22:47:21 -0700 | [diff] [blame] | 564 | struct request_sock_ops	*rsk_prot; | 
| Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 565 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | struct module		*owner; | 
|  | 567 |  | 
|  | 568 | char			name[32]; | 
|  | 569 |  | 
|  | 570 | struct list_head	node; | 
| Arnaldo Carvalho de Melo | e684897 | 2005-08-09 19:45:38 -0700 | [diff] [blame] | 571 | #ifdef SOCK_REFCNT_DEBUG | 
|  | 572 | atomic_t		socks; | 
|  | 573 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | struct { | 
|  | 575 | int inuse; | 
|  | 576 | u8  __pad[SMP_CACHE_BYTES - sizeof(int)]; | 
|  | 577 | } stats[NR_CPUS]; | 
|  | 578 | }; | 
|  | 579 |  | 
|  | 580 | extern int proto_register(struct proto *prot, int alloc_slab); | 
|  | 581 | extern void proto_unregister(struct proto *prot); | 
|  | 582 |  | 
| Arnaldo Carvalho de Melo | e684897 | 2005-08-09 19:45:38 -0700 | [diff] [blame] | 583 | #ifdef SOCK_REFCNT_DEBUG | 
|  | 584 | static inline void sk_refcnt_debug_inc(struct sock *sk) | 
|  | 585 | { | 
|  | 586 | atomic_inc(&sk->sk_prot->socks); | 
|  | 587 | } | 
|  | 588 |  | 
|  | 589 | static inline void sk_refcnt_debug_dec(struct sock *sk) | 
|  | 590 | { | 
|  | 591 | atomic_dec(&sk->sk_prot->socks); | 
|  | 592 | printk(KERN_DEBUG "%s socket %p released, %d are still alive\n", | 
|  | 593 | sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); | 
|  | 594 | } | 
|  | 595 |  | 
|  | 596 | static inline void sk_refcnt_debug_release(const struct sock *sk) | 
|  | 597 | { | 
|  | 598 | if (atomic_read(&sk->sk_refcnt) != 1) | 
|  | 599 | printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", | 
|  | 600 | sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); | 
|  | 601 | } | 
|  | 602 | #else /* SOCK_REFCNT_DEBUG */ | 
|  | 603 | #define sk_refcnt_debug_inc(sk) do { } while (0) | 
|  | 604 | #define sk_refcnt_debug_dec(sk) do { } while (0) | 
|  | 605 | #define sk_refcnt_debug_release(sk) do { } while (0) | 
|  | 606 | #endif /* SOCK_REFCNT_DEBUG */ | 
|  | 607 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | /* Called with local bh disabled */ | 
|  | 609 | static __inline__ void sock_prot_inc_use(struct proto *prot) | 
|  | 610 | { | 
|  | 611 | prot->stats[smp_processor_id()].inuse++; | 
|  | 612 | } | 
|  | 613 |  | 
|  | 614 | static __inline__ void sock_prot_dec_use(struct proto *prot) | 
|  | 615 | { | 
|  | 616 | prot->stats[smp_processor_id()].inuse--; | 
|  | 617 | } | 
|  | 618 |  | 
| Arnaldo Carvalho de Melo | 614c6cb | 2005-08-09 19:47:37 -0700 | [diff] [blame] | 619 | /* With per-bucket locks this operation is not-atomic, so that | 
|  | 620 | * this version is not worse. | 
|  | 621 | */ | 
|  | 622 | static inline void __sk_prot_rehash(struct sock *sk) | 
|  | 623 | { | 
|  | 624 | sk->sk_prot->unhash(sk); | 
|  | 625 | sk->sk_prot->hash(sk); | 
|  | 626 | } | 
|  | 627 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | /* About 10 seconds */ | 
|  | 629 | #define SOCK_DESTROY_TIME (10*HZ) | 
|  | 630 |  | 
|  | 631 | /* Sockets 0-1023 can't be bound to unless you are superuser */ | 
|  | 632 | #define PROT_SOCK	1024 | 
|  | 633 |  | 
|  | 634 | #define SHUTDOWN_MASK	3 | 
|  | 635 | #define RCV_SHUTDOWN	1 | 
|  | 636 | #define SEND_SHUTDOWN	2 | 
|  | 637 |  | 
|  | 638 | #define SOCK_SNDBUF_LOCK	1 | 
|  | 639 | #define SOCK_RCVBUF_LOCK	2 | 
|  | 640 | #define SOCK_BINDADDR_LOCK	4 | 
|  | 641 | #define SOCK_BINDPORT_LOCK	8 | 
|  | 642 |  | 
|  | 643 | /* sock_iocb: used to kick off async processing of socket ios */ | 
|  | 644 | struct sock_iocb { | 
|  | 645 | struct list_head	list; | 
|  | 646 |  | 
|  | 647 | int			flags; | 
|  | 648 | int			size; | 
|  | 649 | struct socket		*sock; | 
|  | 650 | struct sock		*sk; | 
|  | 651 | struct scm_cookie	*scm; | 
|  | 652 | struct msghdr		*msg, async_msg; | 
|  | 653 | struct iovec		async_iov; | 
|  | 654 | struct kiocb		*kiocb; | 
|  | 655 | }; | 
|  | 656 |  | 
|  | 657 | static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb) | 
|  | 658 | { | 
|  | 659 | return (struct sock_iocb *)iocb->private; | 
|  | 660 | } | 
|  | 661 |  | 
|  | 662 | static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si) | 
|  | 663 | { | 
|  | 664 | return si->kiocb; | 
|  | 665 | } | 
|  | 666 |  | 
|  | 667 | struct socket_alloc { | 
|  | 668 | struct socket socket; | 
|  | 669 | struct inode vfs_inode; | 
|  | 670 | }; | 
|  | 671 |  | 
|  | 672 | static inline struct socket *SOCKET_I(struct inode *inode) | 
|  | 673 | { | 
|  | 674 | return &container_of(inode, struct socket_alloc, vfs_inode)->socket; | 
|  | 675 | } | 
|  | 676 |  | 
|  | 677 | static inline struct inode *SOCK_INODE(struct socket *socket) | 
|  | 678 | { | 
|  | 679 | return &container_of(socket, struct socket_alloc, socket)->vfs_inode; | 
|  | 680 | } | 
|  | 681 |  | 
|  | 682 | extern void __sk_stream_mem_reclaim(struct sock *sk); | 
|  | 683 | extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); | 
|  | 684 |  | 
|  | 685 | #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) | 
|  | 686 |  | 
|  | 687 | static inline int sk_stream_pages(int amt) | 
|  | 688 | { | 
|  | 689 | return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM; | 
|  | 690 | } | 
|  | 691 |  | 
|  | 692 | static inline void sk_stream_mem_reclaim(struct sock *sk) | 
|  | 693 | { | 
|  | 694 | if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) | 
|  | 695 | __sk_stream_mem_reclaim(sk); | 
|  | 696 | } | 
|  | 697 |  | 
|  | 698 | static inline void sk_stream_writequeue_purge(struct sock *sk) | 
|  | 699 | { | 
|  | 700 | struct sk_buff *skb; | 
|  | 701 |  | 
|  | 702 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | 
|  | 703 | sk_stream_free_skb(sk, skb); | 
|  | 704 | sk_stream_mem_reclaim(sk); | 
|  | 705 | } | 
|  | 706 |  | 
|  | 707 | static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) | 
|  | 708 | { | 
|  | 709 | return (int)skb->truesize <= sk->sk_forward_alloc || | 
|  | 710 | sk_stream_mem_schedule(sk, skb->truesize, 1); | 
|  | 711 | } | 
|  | 712 |  | 
| Herbert Xu | d80d99d | 2005-09-01 17:48:23 -0700 | [diff] [blame] | 713 | static inline int sk_stream_wmem_schedule(struct sock *sk, int size) | 
|  | 714 | { | 
|  | 715 | return size <= sk->sk_forward_alloc || | 
|  | 716 | sk_stream_mem_schedule(sk, size, 0); | 
|  | 717 | } | 
|  | 718 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | /* Used by processes to "lock" a socket state, so that | 
|  | 720 | * interrupts and bottom half handlers won't change it | 
|  | 721 | * from under us. It essentially blocks any incoming | 
|  | 722 | * packets, so that we won't get any new data or any | 
|  | 723 | * packets that change the state of the socket. | 
|  | 724 | * | 
|  | 725 | * While locked, BH processing will add new packets to | 
|  | 726 | * the backlog queue.  This queue is processed by the | 
|  | 727 | * owner of the socket lock right before it is released. | 
|  | 728 | * | 
|  | 729 | * Since ~2.3.5 it is also exclusive sleep lock serializing | 
|  | 730 | * accesses from user process context. | 
|  | 731 | */ | 
|  | 732 | #define sock_owned_by_user(sk)	((sk)->sk_lock.owner) | 
|  | 733 |  | 
|  | 734 | extern void FASTCALL(lock_sock(struct sock *sk)); | 
|  | 735 | extern void FASTCALL(release_sock(struct sock *sk)); | 
|  | 736 |  | 
|  | 737 | /* BH context may only use the following locking interface. */ | 
|  | 738 | #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock)) | 
|  | 739 | #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock)) | 
|  | 740 |  | 
| Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 741 | extern struct sock		*sk_alloc(int family, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 742 | gfp_t priority, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | struct proto *prot, int zero_it); | 
|  | 744 | extern void			sk_free(struct sock *sk); | 
| Arnaldo Carvalho de Melo | 87d11ce | 2005-08-09 20:10:12 -0700 | [diff] [blame] | 745 | extern struct sock		*sk_clone(const struct sock *sk, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 746 | const gfp_t priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 |  | 
|  | 748 | extern struct sk_buff		*sock_wmalloc(struct sock *sk, | 
|  | 749 | unsigned long size, int force, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 750 | gfp_t priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | extern struct sk_buff		*sock_rmalloc(struct sock *sk, | 
|  | 752 | unsigned long size, int force, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 753 | gfp_t priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | extern void			sock_wfree(struct sk_buff *skb); | 
|  | 755 | extern void			sock_rfree(struct sk_buff *skb); | 
|  | 756 |  | 
|  | 757 | extern int			sock_setsockopt(struct socket *sock, int level, | 
|  | 758 | int op, char __user *optval, | 
|  | 759 | int optlen); | 
|  | 760 |  | 
|  | 761 | extern int			sock_getsockopt(struct socket *sock, int level, | 
|  | 762 | int op, char __user *optval, | 
|  | 763 | int __user *optlen); | 
|  | 764 | extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk, | 
|  | 765 | unsigned long size, | 
|  | 766 | int noblock, | 
|  | 767 | int *errcode); | 
| Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 768 | extern void *sock_kmalloc(struct sock *sk, int size, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 769 | gfp_t priority); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | extern void sock_kfree_s(struct sock *sk, void *mem, int size); | 
|  | 771 | extern void sk_send_sigurg(struct sock *sk); | 
|  | 772 |  | 
|  | 773 | /* | 
|  | 774 | * Functions to fill in entries in struct proto_ops when a protocol | 
|  | 775 | * does not implement a particular function. | 
|  | 776 | */ | 
|  | 777 | extern int                      sock_no_bind(struct socket *, | 
|  | 778 | struct sockaddr *, int); | 
|  | 779 | extern int                      sock_no_connect(struct socket *, | 
|  | 780 | struct sockaddr *, int, int); | 
|  | 781 | extern int                      sock_no_socketpair(struct socket *, | 
|  | 782 | struct socket *); | 
|  | 783 | extern int                      sock_no_accept(struct socket *, | 
|  | 784 | struct socket *, int); | 
|  | 785 | extern int                      sock_no_getname(struct socket *, | 
|  | 786 | struct sockaddr *, int *, int); | 
|  | 787 | extern unsigned int             sock_no_poll(struct file *, struct socket *, | 
|  | 788 | struct poll_table_struct *); | 
|  | 789 | extern int                      sock_no_ioctl(struct socket *, unsigned int, | 
|  | 790 | unsigned long); | 
|  | 791 | extern int			sock_no_listen(struct socket *, int); | 
|  | 792 | extern int                      sock_no_shutdown(struct socket *, int); | 
|  | 793 | extern int			sock_no_getsockopt(struct socket *, int , int, | 
|  | 794 | char __user *, int __user *); | 
|  | 795 | extern int			sock_no_setsockopt(struct socket *, int, int, | 
|  | 796 | char __user *, int); | 
|  | 797 | extern int                      sock_no_sendmsg(struct kiocb *, struct socket *, | 
|  | 798 | struct msghdr *, size_t); | 
|  | 799 | extern int                      sock_no_recvmsg(struct kiocb *, struct socket *, | 
|  | 800 | struct msghdr *, size_t, int); | 
|  | 801 | extern int			sock_no_mmap(struct file *file, | 
|  | 802 | struct socket *sock, | 
|  | 803 | struct vm_area_struct *vma); | 
|  | 804 | extern ssize_t			sock_no_sendpage(struct socket *sock, | 
|  | 805 | struct page *page, | 
|  | 806 | int offset, size_t size, | 
|  | 807 | int flags); | 
|  | 808 |  | 
|  | 809 | /* | 
|  | 810 | * Functions to fill in entries in struct proto_ops when a protocol | 
|  | 811 | * uses the inet style. | 
|  | 812 | */ | 
|  | 813 | extern int sock_common_getsockopt(struct socket *sock, int level, int optname, | 
|  | 814 | char __user *optval, int __user *optlen); | 
|  | 815 | extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, | 
|  | 816 | struct msghdr *msg, size_t size, int flags); | 
|  | 817 | extern int sock_common_setsockopt(struct socket *sock, int level, int optname, | 
|  | 818 | char __user *optval, int optlen); | 
|  | 819 |  | 
|  | 820 | extern void sk_common_release(struct sock *sk); | 
|  | 821 |  | 
|  | 822 | /* | 
|  | 823 | *	Default socket callbacks and setup code | 
|  | 824 | */ | 
|  | 825 |  | 
|  | 826 | /* Initialise core socket variables */ | 
|  | 827 | extern void sock_init_data(struct socket *sock, struct sock *sk); | 
|  | 828 |  | 
|  | 829 | /** | 
|  | 830 | *	sk_filter - run a packet through a socket filter | 
|  | 831 | *	@sk: sock associated with &sk_buff | 
|  | 832 | *	@skb: buffer to filter | 
|  | 833 | *	@needlock: set to 1 if the sock is not locked by caller. | 
|  | 834 | * | 
|  | 835 | * Run the filter code and then cut skb->data to correct size returned by | 
|  | 836 | * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller | 
|  | 837 | * than pkt_len we keep whole skb->data. This is the socket level | 
|  | 838 | * wrapper to sk_run_filter. It returns 0 if the packet should | 
|  | 839 | * be accepted or -EPERM if the packet should be tossed. | 
|  | 840 | * | 
|  | 841 | */ | 
|  | 842 |  | 
|  | 843 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) | 
|  | 844 | { | 
|  | 845 | int err; | 
|  | 846 |  | 
|  | 847 | err = security_sock_rcv_skb(sk, skb); | 
|  | 848 | if (err) | 
|  | 849 | return err; | 
|  | 850 |  | 
|  | 851 | if (sk->sk_filter) { | 
|  | 852 | struct sk_filter *filter; | 
|  | 853 |  | 
|  | 854 | if (needlock) | 
|  | 855 | bh_lock_sock(sk); | 
|  | 856 |  | 
|  | 857 | filter = sk->sk_filter; | 
|  | 858 | if (filter) { | 
|  | 859 | int pkt_len = sk_run_filter(skb, filter->insns, | 
|  | 860 | filter->len); | 
|  | 861 | if (!pkt_len) | 
|  | 862 | err = -EPERM; | 
|  | 863 | else | 
|  | 864 | skb_trim(skb, pkt_len); | 
|  | 865 | } | 
|  | 866 |  | 
|  | 867 | if (needlock) | 
|  | 868 | bh_unlock_sock(sk); | 
|  | 869 | } | 
|  | 870 | return err; | 
|  | 871 | } | 
|  | 872 |  | 
|  | 873 | /** | 
|  | 874 | *	sk_filter_release: Release a socket filter | 
|  | 875 | *	@sk: socket | 
|  | 876 | *	@fp: filter to remove | 
|  | 877 | * | 
|  | 878 | *	Remove a filter from a socket and release its resources. | 
|  | 879 | */ | 
|  | 880 |  | 
|  | 881 | static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) | 
|  | 882 | { | 
|  | 883 | unsigned int size = sk_filter_len(fp); | 
|  | 884 |  | 
|  | 885 | atomic_sub(size, &sk->sk_omem_alloc); | 
|  | 886 |  | 
|  | 887 | if (atomic_dec_and_test(&fp->refcnt)) | 
|  | 888 | kfree(fp); | 
|  | 889 | } | 
|  | 890 |  | 
|  | 891 | static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) | 
|  | 892 | { | 
|  | 893 | atomic_inc(&fp->refcnt); | 
|  | 894 | atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); | 
|  | 895 | } | 
|  | 896 |  | 
|  | 897 | /* | 
|  | 898 | * Socket reference counting postulates. | 
|  | 899 | * | 
|  | 900 | * * Each user of socket SHOULD hold a reference count. | 
|  | 901 | * * Each access point to socket (an hash table bucket, reference from a list, | 
|  | 902 | *   running timer, skb in flight MUST hold a reference count. | 
|  | 903 | * * When reference count hits 0, it means it will never increase back. | 
|  | 904 | * * When reference count hits 0, it means that no references from | 
|  | 905 | *   outside exist to this socket and current process on current CPU | 
|  | 906 | *   is last user and may/should destroy this socket. | 
|  | 907 | * * sk_free is called from any context: process, BH, IRQ. When | 
|  | 908 | *   it is called, socket has no references from outside -> sk_free | 
|  | 909 | *   may release descendant resources allocated by the socket, but | 
|  | 910 | *   to the time when it is called, socket is NOT referenced by any | 
|  | 911 | *   hash tables, lists etc. | 
|  | 912 | * * Packets, delivered from outside (from network or from another process) | 
|  | 913 | *   and enqueued on receive/error queues SHOULD NOT grab reference count, | 
|  | 914 | *   when they sit in queue. Otherwise, packets will leak to hole, when | 
|  | 915 | *   socket is looked up by one cpu and unhasing is made by another CPU. | 
|  | 916 | *   It is true for udp/raw, netlink (leak to receive and error queues), tcp | 
|  | 917 | *   (leak to backlog). Packet socket does all the processing inside | 
|  | 918 | *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets | 
|  | 919 | *   use separate SMP lock, so that they are prone too. | 
|  | 920 | */ | 
|  | 921 |  | 
|  | 922 | /* Ungrab socket and destroy it, if it was the last reference. */ | 
|  | 923 | static inline void sock_put(struct sock *sk) | 
|  | 924 | { | 
|  | 925 | if (atomic_dec_and_test(&sk->sk_refcnt)) | 
|  | 926 | sk_free(sk); | 
|  | 927 | } | 
|  | 928 |  | 
|  | 929 | /* Detach socket from process context. | 
|  | 930 | * Announce socket dead, detach it from wait queue and inode. | 
|  | 931 | * Note that parent inode held reference count on this struct sock, | 
|  | 932 | * we do not release it in this function, because protocol | 
|  | 933 | * probably wants some additional cleanups or even continuing | 
|  | 934 | * to work with this socket (TCP). | 
|  | 935 | */ | 
|  | 936 | static inline void sock_orphan(struct sock *sk) | 
|  | 937 | { | 
|  | 938 | write_lock_bh(&sk->sk_callback_lock); | 
|  | 939 | sock_set_flag(sk, SOCK_DEAD); | 
|  | 940 | sk->sk_socket = NULL; | 
|  | 941 | sk->sk_sleep  = NULL; | 
|  | 942 | write_unlock_bh(&sk->sk_callback_lock); | 
|  | 943 | } | 
|  | 944 |  | 
|  | 945 | static inline void sock_graft(struct sock *sk, struct socket *parent) | 
|  | 946 | { | 
|  | 947 | write_lock_bh(&sk->sk_callback_lock); | 
|  | 948 | sk->sk_sleep = &parent->wait; | 
|  | 949 | parent->sk = sk; | 
|  | 950 | sk->sk_socket = parent; | 
|  | 951 | write_unlock_bh(&sk->sk_callback_lock); | 
|  | 952 | } | 
|  | 953 |  | 
|  | 954 | extern int sock_i_uid(struct sock *sk); | 
|  | 955 | extern unsigned long sock_i_ino(struct sock *sk); | 
|  | 956 |  | 
|  | 957 | static inline struct dst_entry * | 
|  | 958 | __sk_dst_get(struct sock *sk) | 
|  | 959 | { | 
|  | 960 | return sk->sk_dst_cache; | 
|  | 961 | } | 
|  | 962 |  | 
|  | 963 | static inline struct dst_entry * | 
|  | 964 | sk_dst_get(struct sock *sk) | 
|  | 965 | { | 
|  | 966 | struct dst_entry *dst; | 
|  | 967 |  | 
|  | 968 | read_lock(&sk->sk_dst_lock); | 
|  | 969 | dst = sk->sk_dst_cache; | 
|  | 970 | if (dst) | 
|  | 971 | dst_hold(dst); | 
|  | 972 | read_unlock(&sk->sk_dst_lock); | 
|  | 973 | return dst; | 
|  | 974 | } | 
|  | 975 |  | 
|  | 976 | static inline void | 
|  | 977 | __sk_dst_set(struct sock *sk, struct dst_entry *dst) | 
|  | 978 | { | 
|  | 979 | struct dst_entry *old_dst; | 
|  | 980 |  | 
|  | 981 | old_dst = sk->sk_dst_cache; | 
|  | 982 | sk->sk_dst_cache = dst; | 
|  | 983 | dst_release(old_dst); | 
|  | 984 | } | 
|  | 985 |  | 
|  | 986 | static inline void | 
|  | 987 | sk_dst_set(struct sock *sk, struct dst_entry *dst) | 
|  | 988 | { | 
|  | 989 | write_lock(&sk->sk_dst_lock); | 
|  | 990 | __sk_dst_set(sk, dst); | 
|  | 991 | write_unlock(&sk->sk_dst_lock); | 
|  | 992 | } | 
|  | 993 |  | 
|  | 994 | static inline void | 
|  | 995 | __sk_dst_reset(struct sock *sk) | 
|  | 996 | { | 
|  | 997 | struct dst_entry *old_dst; | 
|  | 998 |  | 
|  | 999 | old_dst = sk->sk_dst_cache; | 
|  | 1000 | sk->sk_dst_cache = NULL; | 
|  | 1001 | dst_release(old_dst); | 
|  | 1002 | } | 
|  | 1003 |  | 
|  | 1004 | static inline void | 
|  | 1005 | sk_dst_reset(struct sock *sk) | 
|  | 1006 | { | 
|  | 1007 | write_lock(&sk->sk_dst_lock); | 
|  | 1008 | __sk_dst_reset(sk); | 
|  | 1009 | write_unlock(&sk->sk_dst_lock); | 
|  | 1010 | } | 
|  | 1011 |  | 
|  | 1012 | static inline struct dst_entry * | 
|  | 1013 | __sk_dst_check(struct sock *sk, u32 cookie) | 
|  | 1014 | { | 
|  | 1015 | struct dst_entry *dst = sk->sk_dst_cache; | 
|  | 1016 |  | 
|  | 1017 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | 
|  | 1018 | sk->sk_dst_cache = NULL; | 
|  | 1019 | dst_release(dst); | 
|  | 1020 | return NULL; | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | return dst; | 
|  | 1024 | } | 
|  | 1025 |  | 
|  | 1026 | static inline struct dst_entry * | 
|  | 1027 | sk_dst_check(struct sock *sk, u32 cookie) | 
|  | 1028 | { | 
|  | 1029 | struct dst_entry *dst = sk_dst_get(sk); | 
|  | 1030 |  | 
|  | 1031 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | 
|  | 1032 | sk_dst_reset(sk); | 
|  | 1033 | dst_release(dst); | 
|  | 1034 | return NULL; | 
|  | 1035 | } | 
|  | 1036 |  | 
|  | 1037 | return dst; | 
|  | 1038 | } | 
|  | 1039 |  | 
| Arnaldo Carvalho de Melo | 6cbb0df | 2005-08-09 19:49:02 -0700 | [diff] [blame] | 1040 | static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | 
|  | 1041 | { | 
|  | 1042 | __sk_dst_set(sk, dst); | 
|  | 1043 | sk->sk_route_caps = dst->dev->features; | 
|  | 1044 | if (sk->sk_route_caps & NETIF_F_TSO) { | 
|  | 1045 | if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) | 
|  | 1046 | sk->sk_route_caps &= ~NETIF_F_TSO; | 
|  | 1047 | } | 
|  | 1048 | } | 
|  | 1049 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) | 
|  | 1051 | { | 
|  | 1052 | sk->sk_wmem_queued   += skb->truesize; | 
|  | 1053 | sk->sk_forward_alloc -= skb->truesize; | 
|  | 1054 | } | 
|  | 1055 |  | 
|  | 1056 | static inline int skb_copy_to_page(struct sock *sk, char __user *from, | 
|  | 1057 | struct sk_buff *skb, struct page *page, | 
|  | 1058 | int off, int copy) | 
|  | 1059 | { | 
|  | 1060 | if (skb->ip_summed == CHECKSUM_NONE) { | 
|  | 1061 | int err = 0; | 
|  | 1062 | unsigned int csum = csum_and_copy_from_user(from, | 
|  | 1063 | page_address(page) + off, | 
|  | 1064 | copy, 0, &err); | 
|  | 1065 | if (err) | 
|  | 1066 | return err; | 
|  | 1067 | skb->csum = csum_block_add(skb->csum, csum, skb->len); | 
|  | 1068 | } else if (copy_from_user(page_address(page) + off, from, copy)) | 
|  | 1069 | return -EFAULT; | 
|  | 1070 |  | 
|  | 1071 | skb->len	     += copy; | 
|  | 1072 | skb->data_len	     += copy; | 
|  | 1073 | skb->truesize	     += copy; | 
|  | 1074 | sk->sk_wmem_queued   += copy; | 
|  | 1075 | sk->sk_forward_alloc -= copy; | 
|  | 1076 | return 0; | 
|  | 1077 | } | 
|  | 1078 |  | 
|  | 1079 | /* | 
|  | 1080 | * 	Queue a received datagram if it will fit. Stream and sequenced | 
|  | 1081 | *	protocols can't normally use this as they need to fit buffers in | 
|  | 1082 | *	and play with them. | 
|  | 1083 | * | 
|  | 1084 | * 	Inlined as it's very short and called for pretty much every | 
|  | 1085 | *	packet ever received. | 
|  | 1086 | */ | 
|  | 1087 |  | 
|  | 1088 | static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) | 
|  | 1089 | { | 
|  | 1090 | sock_hold(sk); | 
|  | 1091 | skb->sk = sk; | 
|  | 1092 | skb->destructor = sock_wfree; | 
|  | 1093 | atomic_add(skb->truesize, &sk->sk_wmem_alloc); | 
|  | 1094 | } | 
|  | 1095 |  | 
|  | 1096 | static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) | 
|  | 1097 | { | 
|  | 1098 | skb->sk = sk; | 
|  | 1099 | skb->destructor = sock_rfree; | 
|  | 1100 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | 
|  | 1101 | } | 
|  | 1102 |  | 
|  | 1103 | extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, | 
|  | 1104 | unsigned long expires); | 
|  | 1105 |  | 
|  | 1106 | extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); | 
|  | 1107 |  | 
|  | 1108 | static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 
|  | 1109 | { | 
|  | 1110 | int err = 0; | 
|  | 1111 | int skb_len; | 
|  | 1112 |  | 
|  | 1113 | /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces | 
|  | 1114 | number of warnings when compiling with -W --ANK | 
|  | 1115 | */ | 
|  | 1116 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | 
|  | 1117 | (unsigned)sk->sk_rcvbuf) { | 
|  | 1118 | err = -ENOMEM; | 
|  | 1119 | goto out; | 
|  | 1120 | } | 
|  | 1121 |  | 
|  | 1122 | /* It would be deadlock, if sock_queue_rcv_skb is used | 
|  | 1123 | with socket lock! We assume that users of this | 
|  | 1124 | function are lock free. | 
|  | 1125 | */ | 
|  | 1126 | err = sk_filter(sk, skb, 1); | 
|  | 1127 | if (err) | 
|  | 1128 | goto out; | 
|  | 1129 |  | 
|  | 1130 | skb->dev = NULL; | 
|  | 1131 | skb_set_owner_r(skb, sk); | 
|  | 1132 |  | 
|  | 1133 | /* Cache the SKB length before we tack it onto the receive | 
|  | 1134 | * queue.  Once it is added it no longer belongs to us and | 
|  | 1135 | * may be freed by other threads of control pulling packets | 
|  | 1136 | * from the queue. | 
|  | 1137 | */ | 
|  | 1138 | skb_len = skb->len; | 
|  | 1139 |  | 
|  | 1140 | skb_queue_tail(&sk->sk_receive_queue, skb); | 
|  | 1141 |  | 
|  | 1142 | if (!sock_flag(sk, SOCK_DEAD)) | 
|  | 1143 | sk->sk_data_ready(sk, skb_len); | 
|  | 1144 | out: | 
|  | 1145 | return err; | 
|  | 1146 | } | 
|  | 1147 |  | 
|  | 1148 | static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | 
|  | 1149 | { | 
|  | 1150 | /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces | 
|  | 1151 | number of warnings when compiling with -W --ANK | 
|  | 1152 | */ | 
|  | 1153 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | 
|  | 1154 | (unsigned)sk->sk_rcvbuf) | 
|  | 1155 | return -ENOMEM; | 
|  | 1156 | skb_set_owner_r(skb, sk); | 
|  | 1157 | skb_queue_tail(&sk->sk_error_queue, skb); | 
|  | 1158 | if (!sock_flag(sk, SOCK_DEAD)) | 
|  | 1159 | sk->sk_data_ready(sk, skb->len); | 
|  | 1160 | return 0; | 
|  | 1161 | } | 
|  | 1162 |  | 
|  | 1163 | /* | 
|  | 1164 | *	Recover an error report and clear atomically | 
|  | 1165 | */ | 
|  | 1166 |  | 
|  | 1167 | static inline int sock_error(struct sock *sk) | 
|  | 1168 | { | 
|  | 1169 | int err = xchg(&sk->sk_err, 0); | 
|  | 1170 | return -err; | 
|  | 1171 | } | 
|  | 1172 |  | 
|  | 1173 | static inline unsigned long sock_wspace(struct sock *sk) | 
|  | 1174 | { | 
|  | 1175 | int amt = 0; | 
|  | 1176 |  | 
|  | 1177 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | 
|  | 1178 | amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | 
|  | 1179 | if (amt < 0) | 
|  | 1180 | amt = 0; | 
|  | 1181 | } | 
|  | 1182 | return amt; | 
|  | 1183 | } | 
|  | 1184 |  | 
|  | 1185 | static inline void sk_wake_async(struct sock *sk, int how, int band) | 
|  | 1186 | { | 
|  | 1187 | if (sk->sk_socket && sk->sk_socket->fasync_list) | 
|  | 1188 | sock_wake_async(sk->sk_socket, how, band); | 
|  | 1189 | } | 
|  | 1190 |  | 
|  | 1191 | #define SOCK_MIN_SNDBUF 2048 | 
|  | 1192 | #define SOCK_MIN_RCVBUF 256 | 
|  | 1193 |  | 
|  | 1194 | static inline void sk_stream_moderate_sndbuf(struct sock *sk) | 
|  | 1195 | { | 
|  | 1196 | if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { | 
|  | 1197 | sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); | 
|  | 1198 | sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); | 
|  | 1199 | } | 
|  | 1200 | } | 
|  | 1201 |  | 
|  | 1202 | static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, | 
| Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1203 | int size, int mem, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1204 | gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1205 | { | 
| David S. Miller | c65f7f0 | 2005-07-05 15:17:25 -0700 | [diff] [blame] | 1206 | struct sk_buff *skb; | 
|  | 1207 | int hdr_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 |  | 
| David S. Miller | c65f7f0 | 2005-07-05 15:17:25 -0700 | [diff] [blame] | 1209 | hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header); | 
| David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 1210 | skb = alloc_skb_fclone(size + hdr_len, gfp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1211 | if (skb) { | 
|  | 1212 | skb->truesize += mem; | 
| Herbert Xu | d80d99d | 2005-09-01 17:48:23 -0700 | [diff] [blame] | 1213 | if (sk_stream_wmem_schedule(sk, skb->truesize)) { | 
| David S. Miller | c65f7f0 | 2005-07-05 15:17:25 -0700 | [diff] [blame] | 1214 | skb_reserve(skb, hdr_len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | return skb; | 
|  | 1216 | } | 
|  | 1217 | __kfree_skb(skb); | 
|  | 1218 | } else { | 
|  | 1219 | sk->sk_prot->enter_memory_pressure(); | 
|  | 1220 | sk_stream_moderate_sndbuf(sk); | 
|  | 1221 | } | 
|  | 1222 | return NULL; | 
|  | 1223 | } | 
|  | 1224 |  | 
|  | 1225 | static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, | 
| Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1226 | int size, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1227 | gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | { | 
|  | 1229 | return sk_stream_alloc_pskb(sk, size, 0, gfp); | 
|  | 1230 | } | 
|  | 1231 |  | 
|  | 1232 | static inline struct page *sk_stream_alloc_page(struct sock *sk) | 
|  | 1233 | { | 
|  | 1234 | struct page *page = NULL; | 
|  | 1235 |  | 
| Herbert Xu | ef01578 | 2005-09-01 17:48:59 -0700 | [diff] [blame] | 1236 | page = alloc_pages(sk->sk_allocation, 0); | 
|  | 1237 | if (!page) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | sk->sk_prot->enter_memory_pressure(); | 
|  | 1239 | sk_stream_moderate_sndbuf(sk); | 
|  | 1240 | } | 
|  | 1241 | return page; | 
|  | 1242 | } | 
|  | 1243 |  | 
|  | 1244 | #define sk_stream_for_retrans_queue(skb, sk)				\ | 
|  | 1245 | for (skb = (sk)->sk_write_queue.next;			\ | 
|  | 1246 | (skb != (sk)->sk_send_head) &&			\ | 
|  | 1247 | (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\ | 
|  | 1248 | skb = skb->next) | 
|  | 1249 |  | 
|  | 1250 | /* | 
|  | 1251 | *	Default write policy as shown to user space via poll/select/SIGIO | 
|  | 1252 | */ | 
|  | 1253 | static inline int sock_writeable(const struct sock *sk) | 
|  | 1254 | { | 
|  | 1255 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); | 
|  | 1256 | } | 
|  | 1257 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1258 | static inline gfp_t gfp_any(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | { | 
|  | 1260 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; | 
|  | 1261 | } | 
|  | 1262 |  | 
|  | 1263 | static inline long sock_rcvtimeo(const struct sock *sk, int noblock) | 
|  | 1264 | { | 
|  | 1265 | return noblock ? 0 : sk->sk_rcvtimeo; | 
|  | 1266 | } | 
|  | 1267 |  | 
|  | 1268 | static inline long sock_sndtimeo(const struct sock *sk, int noblock) | 
|  | 1269 | { | 
|  | 1270 | return noblock ? 0 : sk->sk_sndtimeo; | 
|  | 1271 | } | 
|  | 1272 |  | 
|  | 1273 | static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) | 
|  | 1274 | { | 
|  | 1275 | return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; | 
|  | 1276 | } | 
|  | 1277 |  | 
|  | 1278 | /* Alas, with timeout socket operations are not restartable. | 
|  | 1279 | * Compare this to poll(). | 
|  | 1280 | */ | 
|  | 1281 | static inline int sock_intr_errno(long timeo) | 
|  | 1282 | { | 
|  | 1283 | return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; | 
|  | 1284 | } | 
|  | 1285 |  | 
|  | 1286 | static __inline__ void | 
|  | 1287 | sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) | 
|  | 1288 | { | 
| Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 1289 | struct timeval stamp; | 
|  | 1290 |  | 
|  | 1291 | skb_get_timestamp(skb, &stamp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | if (sock_flag(sk, SOCK_RCVTSTAMP)) { | 
|  | 1293 | /* Race occurred between timestamp enabling and packet | 
|  | 1294 | receiving.  Fill in the current time for now. */ | 
| Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 1295 | if (stamp.tv_sec == 0) | 
|  | 1296 | do_gettimeofday(&stamp); | 
|  | 1297 | skb_set_timestamp(skb, &stamp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1298 | put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval), | 
| Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 1299 | &stamp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | } else | 
| Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 1301 | sk->sk_stamp = stamp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | } | 
|  | 1303 |  | 
|  | 1304 | /** | 
|  | 1305 | * sk_eat_skb - Release a skb if it is no longer needed | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 1306 | * @sk: socket to eat this skb from | 
|  | 1307 | * @skb: socket buffer to eat | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | * | 
|  | 1309 | * This routine must be called with interrupts disabled or with the socket | 
|  | 1310 | * locked so that the sk_buff queue operation is ok. | 
|  | 1311 | */ | 
|  | 1312 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) | 
|  | 1313 | { | 
|  | 1314 | __skb_unlink(skb, &sk->sk_receive_queue); | 
|  | 1315 | __kfree_skb(skb); | 
|  | 1316 | } | 
|  | 1317 |  | 
|  | 1318 | extern void sock_enable_timestamp(struct sock *sk); | 
|  | 1319 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); | 
|  | 1320 |  | 
|  | 1321 | /* | 
|  | 1322 | *	Enable debug/info messages | 
|  | 1323 | */ | 
|  | 1324 |  | 
|  | 1325 | #if 0 | 
| Patrick McHardy | 64ce207 | 2005-08-09 20:50:53 -0700 | [diff] [blame] | 1326 | #define NETDEBUG(fmt, args...)	do { } while (0) | 
|  | 1327 | #define LIMIT_NETDEBUG(fmt, args...) do { } while(0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | #else | 
| Patrick McHardy | 64ce207 | 2005-08-09 20:50:53 -0700 | [diff] [blame] | 1329 | #define NETDEBUG(fmt, args...)	printk(fmt,##args) | 
|  | 1330 | #define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | #endif | 
|  | 1332 |  | 
|  | 1333 | /* | 
|  | 1334 | * Macros for sleeping on a socket. Use them like this: | 
|  | 1335 | * | 
|  | 1336 | * SOCK_SLEEP_PRE(sk) | 
|  | 1337 | * if (condition) | 
|  | 1338 | * 	schedule(); | 
|  | 1339 | * SOCK_SLEEP_POST(sk) | 
|  | 1340 | * | 
|  | 1341 | * N.B. These are now obsolete and were, afaik, only ever used in DECnet | 
|  | 1342 | * and when the last use of them in DECnet has gone, I'm intending to | 
|  | 1343 | * remove them. | 
|  | 1344 | */ | 
|  | 1345 |  | 
|  | 1346 | #define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \ | 
|  | 1347 | DECLARE_WAITQUEUE(wait, tsk); \ | 
|  | 1348 | tsk->state = TASK_INTERRUPTIBLE; \ | 
|  | 1349 | add_wait_queue((sk)->sk_sleep, &wait); \ | 
|  | 1350 | release_sock(sk); | 
|  | 1351 |  | 
|  | 1352 | #define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \ | 
|  | 1353 | remove_wait_queue((sk)->sk_sleep, &wait); \ | 
|  | 1354 | lock_sock(sk); \ | 
|  | 1355 | } | 
|  | 1356 |  | 
|  | 1357 | static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) | 
|  | 1358 | { | 
|  | 1359 | if (valbool) | 
|  | 1360 | sock_set_flag(sk, bit); | 
|  | 1361 | else | 
|  | 1362 | sock_reset_flag(sk, bit); | 
|  | 1363 | } | 
|  | 1364 |  | 
|  | 1365 | extern __u32 sysctl_wmem_max; | 
|  | 1366 | extern __u32 sysctl_rmem_max; | 
|  | 1367 |  | 
|  | 1368 | #ifdef CONFIG_NET | 
|  | 1369 | int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); | 
|  | 1370 | #else | 
|  | 1371 | static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) | 
|  | 1372 | { | 
|  | 1373 | return -ENODEV; | 
|  | 1374 | } | 
|  | 1375 | #endif | 
|  | 1376 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1377 | extern void sk_init(void); | 
|  | 1378 |  | 
|  | 1379 | #ifdef CONFIG_SYSCTL | 
|  | 1380 | extern struct ctl_table core_table[]; | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1381 | #endif | 
|  | 1382 |  | 
| David S. Miller | 6baf1f4 | 2005-09-05 18:14:11 -0700 | [diff] [blame] | 1383 | extern int sysctl_optmem_max; | 
|  | 1384 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1385 | extern __u32 sysctl_wmem_default; | 
|  | 1386 | extern __u32 sysctl_rmem_default; | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1387 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | #endif	/* _SOCK_H */ |