| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * NET3:	Garbage Collector For AF_UNIX sockets | 
|  | 3 | * | 
|  | 4 | * Garbage Collector: | 
|  | 5 | *	Copyright (C) Barak A. Pearlmutter. | 
|  | 6 | *	Released under the GPL version 2 or later. | 
|  | 7 | * | 
|  | 8 | * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. | 
|  | 9 | * If it doesn't work blame me, it worked when Barak sent it. | 
|  | 10 | * | 
|  | 11 | * Assumptions: | 
|  | 12 | * | 
|  | 13 | *  - object w/ a bit | 
|  | 14 | *  - free list | 
|  | 15 | * | 
|  | 16 | * Current optimizations: | 
|  | 17 | * | 
|  | 18 | *  - explicit stack instead of recursion | 
|  | 19 | *  - tail recurse on first born instead of immediate push/pop | 
|  | 20 | *  - we gather the stuff that should not be killed into tree | 
|  | 21 | *    and stack is just a path from root to the current pointer. | 
|  | 22 | * | 
|  | 23 | *  Future optimizations: | 
|  | 24 | * | 
|  | 25 | *  - don't just push entire root set; process in place | 
|  | 26 | * | 
|  | 27 | *	This program is free software; you can redistribute it and/or | 
|  | 28 | *	modify it under the terms of the GNU General Public License | 
|  | 29 | *	as published by the Free Software Foundation; either version | 
|  | 30 | *	2 of the License, or (at your option) any later version. | 
|  | 31 | * | 
|  | 32 | *  Fixes: | 
|  | 33 | *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed. | 
|  | 34 | *					Cope with changing max_files. | 
|  | 35 | *	Al Viro		11 Oct 1998 | 
|  | 36 | *		Graph may have cycles. That is, we can send the descriptor | 
|  | 37 | *		of foo to bar and vice versa. Current code chokes on that. | 
|  | 38 | *		Fix: move SCM_RIGHTS ones into the separate list and then | 
|  | 39 | *		skb_free() them all instead of doing explicit fput's. | 
|  | 40 | *		Another problem: since fput() may block somebody may | 
|  | 41 | *		create a new unix_socket when we are in the middle of sweep | 
|  | 42 | *		phase. Fix: revert the logic wrt MARKED. Mark everything | 
|  | 43 | *		upon the beginning and unmark non-junk ones. | 
|  | 44 | * | 
|  | 45 | *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS | 
|  | 46 | *		sent to connect()'ed but still not accept()'ed sockets. | 
|  | 47 | *		Fixed. Old code had slightly different problem here: | 
|  | 48 | *		extra fput() in situation when we passed the descriptor via | 
|  | 49 | *		such socket and closed it (descriptor). That would happen on | 
|  | 50 | *		each unix_gc() until the accept(). Since the struct file in | 
|  | 51 | *		question would go to the free list and might be reused... | 
|  | 52 | *		That might be the reason of random oopses on filp_close() | 
|  | 53 | *		in unrelated processes. | 
|  | 54 | * | 
|  | 55 | *	AV		28 Feb 1999 | 
|  | 56 | *		Kill the explicit allocation of stack. Now we keep the tree | 
|  | 57 | *		with root in dummy + pointer (gc_current) to one of the nodes. | 
|  | 58 | *		Stack is represented as path from gc_current to dummy. Unmark | 
|  | 59 | *		now means "add to tree". Push == "make it a son of gc_current". | 
|  | 60 | *		Pop == "move gc_current to parent". We keep only pointers to | 
|  | 61 | *		parents (->gc_tree). | 
|  | 62 | *	AV		1 Mar 1999 | 
|  | 63 | *		Damn. Added missing check for ->dead in listen queues scanning. | 
|  | 64 | * | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 65 | *	Miklos Szeredi 25 Jun 2007 | 
|  | 66 | *		Reimplement with a cycle collecting algorithm. This should | 
|  | 67 | *		solve several problems with the previous code, like being racy | 
|  | 68 | *		wrt receive and holding up unrelated socket operations. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | */ | 
| YOSHIFUJI Hideaki | ac7bfa6 | 2007-02-09 23:25:23 +0900 | [diff] [blame] | 70 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #include <linux/kernel.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #include <linux/string.h> | 
|  | 73 | #include <linux/socket.h> | 
|  | 74 | #include <linux/un.h> | 
|  | 75 | #include <linux/net.h> | 
|  | 76 | #include <linux/fs.h> | 
|  | 77 | #include <linux/slab.h> | 
|  | 78 | #include <linux/skbuff.h> | 
|  | 79 | #include <linux/netdevice.h> | 
|  | 80 | #include <linux/file.h> | 
|  | 81 | #include <linux/proc_fs.h> | 
| Arjan van de Ven | 4a3e2f7 | 2006-03-20 22:33:17 -0800 | [diff] [blame] | 82 | #include <linux/mutex.h> | 
| dann frazier | 5f23b73 | 2008-11-26 15:32:27 -0800 | [diff] [blame] | 83 | #include <linux/wait.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 |  | 
|  | 85 | #include <net/sock.h> | 
|  | 86 | #include <net/af_unix.h> | 
|  | 87 | #include <net/scm.h> | 
| Arnaldo Carvalho de Melo | c752f07 | 2005-08-09 20:08:28 -0700 | [diff] [blame] | 88 | #include <net/tcp_states.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  | 
|  | 90 | /* Internal data structures and random procedures: */ | 
|  | 91 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 92 | static LIST_HEAD(gc_inflight_list); | 
|  | 93 | static LIST_HEAD(gc_candidates); | 
|  | 94 | static DEFINE_SPINLOCK(unix_gc_lock); | 
| dann frazier | 5f23b73 | 2008-11-26 15:32:27 -0800 | [diff] [blame] | 95 | static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  | 
| Pavel Emelyanov | 9305cfa | 2007-11-10 22:06:01 -0800 | [diff] [blame] | 97 | unsigned int unix_tot_inflight; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 |  | 
|  | 99 |  | 
|  | 100 | static struct sock *unix_get_socket(struct file *filp) | 
|  | 101 | { | 
|  | 102 | struct sock *u_sock = NULL; | 
| Josef Sipek | 592ccbf | 2006-12-08 02:37:45 -0800 | [diff] [blame] | 103 | struct inode *inode = filp->f_path.dentry->d_inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 |  | 
|  | 105 | /* | 
|  | 106 | *	Socket ? | 
|  | 107 | */ | 
|  | 108 | if (S_ISSOCK(inode->i_mode)) { | 
| Jianjun Kong | e27dfce | 2008-11-01 21:38:31 -0700 | [diff] [blame] | 109 | struct socket *sock = SOCKET_I(inode); | 
|  | 110 | struct sock *s = sock->sk; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 |  | 
|  | 112 | /* | 
|  | 113 | *	PF_UNIX ? | 
|  | 114 | */ | 
|  | 115 | if (s && sock->ops && sock->ops->family == PF_UNIX) | 
|  | 116 | u_sock = s; | 
|  | 117 | } | 
|  | 118 | return u_sock; | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | /* | 
|  | 122 | *	Keep the number of times in flight count for the file | 
|  | 123 | *	descriptor if it is for an AF_UNIX socket. | 
|  | 124 | */ | 
| YOSHIFUJI Hideaki | ac7bfa6 | 2007-02-09 23:25:23 +0900 | [diff] [blame] | 125 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | void unix_inflight(struct file *fp) | 
|  | 127 | { | 
|  | 128 | struct sock *s = unix_get_socket(fp); | 
| Jianjun Kong | e27dfce | 2008-11-01 21:38:31 -0700 | [diff] [blame] | 129 | if (s) { | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 130 | struct unix_sock *u = unix_sk(s); | 
|  | 131 | spin_lock(&unix_gc_lock); | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 132 | if (atomic_long_inc_return(&u->inflight) == 1) { | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 133 | BUG_ON(!list_empty(&u->link)); | 
|  | 134 | list_add_tail(&u->link, &gc_inflight_list); | 
|  | 135 | } else { | 
|  | 136 | BUG_ON(list_empty(&u->link)); | 
|  | 137 | } | 
| Pavel Emelyanov | 9305cfa | 2007-11-10 22:06:01 -0800 | [diff] [blame] | 138 | unix_tot_inflight++; | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 139 | spin_unlock(&unix_gc_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | } | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | void unix_notinflight(struct file *fp) | 
|  | 144 | { | 
|  | 145 | struct sock *s = unix_get_socket(fp); | 
| Jianjun Kong | e27dfce | 2008-11-01 21:38:31 -0700 | [diff] [blame] | 146 | if (s) { | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 147 | struct unix_sock *u = unix_sk(s); | 
|  | 148 | spin_lock(&unix_gc_lock); | 
|  | 149 | BUG_ON(list_empty(&u->link)); | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 150 | if (atomic_long_dec_and_test(&u->inflight)) | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 151 | list_del_init(&u->link); | 
| Pavel Emelyanov | 9305cfa | 2007-11-10 22:06:01 -0800 | [diff] [blame] | 152 | unix_tot_inflight--; | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 153 | spin_unlock(&unix_gc_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | } | 
|  | 155 | } | 
|  | 156 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 157 | static inline struct sk_buff *sock_queue_head(struct sock *sk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | { | 
| Jianjun Kong | e27dfce | 2008-11-01 21:38:31 -0700 | [diff] [blame] | 159 | return (struct sk_buff *)&sk->sk_receive_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | } | 
|  | 161 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 162 | #define receive_queue_for_each_skb(sk, next, skb) \ | 
|  | 163 | for (skb = sock_queue_head(sk)->next, next = skb->next; \ | 
|  | 164 | skb != sock_queue_head(sk); skb = next, next = skb->next) | 
|  | 165 |  | 
| Pavel Emelyanov | 5c80f1a | 2007-11-10 22:07:13 -0800 | [diff] [blame] | 166 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 167 | struct sk_buff_head *hitlist) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | { | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 169 | struct sk_buff *skb; | 
|  | 170 | struct sk_buff *next; | 
|  | 171 |  | 
|  | 172 | spin_lock(&x->sk_receive_queue.lock); | 
|  | 173 | receive_queue_for_each_skb(x, next, skb) { | 
|  | 174 | /* | 
|  | 175 | *	Do we have file descriptors ? | 
|  | 176 | */ | 
|  | 177 | if (UNIXCB(skb).fp) { | 
|  | 178 | bool hit = false; | 
|  | 179 | /* | 
|  | 180 | *	Process the descriptors of this socket | 
|  | 181 | */ | 
|  | 182 | int nfd = UNIXCB(skb).fp->count; | 
|  | 183 | struct file **fp = UNIXCB(skb).fp->fp; | 
|  | 184 | while (nfd--) { | 
|  | 185 | /* | 
|  | 186 | *	Get the socket the fd matches | 
|  | 187 | *	if it indeed does so | 
|  | 188 | */ | 
|  | 189 | struct sock *sk = unix_get_socket(*fp++); | 
| Pavel Emelyanov | 5c80f1a | 2007-11-10 22:07:13 -0800 | [diff] [blame] | 190 | if (sk) { | 
| Miklos Szeredi | 6209344 | 2008-11-09 15:23:57 +0100 | [diff] [blame] | 191 | struct unix_sock *u = unix_sk(sk); | 
|  | 192 |  | 
|  | 193 | /* | 
|  | 194 | * Ignore non-candidates, they could | 
|  | 195 | * have been added to the queues after | 
|  | 196 | * starting the garbage collection | 
|  | 197 | */ | 
|  | 198 | if (u->gc_candidate) { | 
|  | 199 | hit = true; | 
|  | 200 | func(u); | 
|  | 201 | } | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 202 | } | 
|  | 203 | } | 
|  | 204 | if (hit && hitlist != NULL) { | 
|  | 205 | __skb_unlink(skb, &x->sk_receive_queue); | 
|  | 206 | __skb_queue_tail(hitlist, skb); | 
|  | 207 | } | 
|  | 208 | } | 
|  | 209 | } | 
|  | 210 | spin_unlock(&x->sk_receive_queue.lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | } | 
|  | 212 |  | 
| Pavel Emelyanov | 5c80f1a | 2007-11-10 22:07:13 -0800 | [diff] [blame] | 213 | static void scan_children(struct sock *x, void (*func)(struct unix_sock *), | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 214 | struct sk_buff_head *hitlist) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | { | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 216 | if (x->sk_state != TCP_LISTEN) | 
|  | 217 | scan_inflight(x, func, hitlist); | 
|  | 218 | else { | 
|  | 219 | struct sk_buff *skb; | 
|  | 220 | struct sk_buff *next; | 
|  | 221 | struct unix_sock *u; | 
|  | 222 | LIST_HEAD(embryos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 224 | /* | 
|  | 225 | * For a listening socket collect the queued embryos | 
|  | 226 | * and perform a scan on them as well. | 
|  | 227 | */ | 
|  | 228 | spin_lock(&x->sk_receive_queue.lock); | 
|  | 229 | receive_queue_for_each_skb(x, next, skb) { | 
|  | 230 | u = unix_sk(skb->sk); | 
|  | 231 |  | 
|  | 232 | /* | 
|  | 233 | * An embryo cannot be in-flight, so it's safe | 
|  | 234 | * to use the list link. | 
|  | 235 | */ | 
|  | 236 | BUG_ON(!list_empty(&u->link)); | 
|  | 237 | list_add_tail(&u->link, &embryos); | 
|  | 238 | } | 
|  | 239 | spin_unlock(&x->sk_receive_queue.lock); | 
|  | 240 |  | 
|  | 241 | while (!list_empty(&embryos)) { | 
|  | 242 | u = list_entry(embryos.next, struct unix_sock, link); | 
|  | 243 | scan_inflight(&u->sk, func, hitlist); | 
|  | 244 | list_del_init(&u->link); | 
|  | 245 | } | 
|  | 246 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | } | 
|  | 248 |  | 
| Pavel Emelyanov | 5c80f1a | 2007-11-10 22:07:13 -0800 | [diff] [blame] | 249 | static void dec_inflight(struct unix_sock *usk) | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 250 | { | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 251 | atomic_long_dec(&usk->inflight); | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 252 | } | 
|  | 253 |  | 
| Pavel Emelyanov | 5c80f1a | 2007-11-10 22:07:13 -0800 | [diff] [blame] | 254 | static void inc_inflight(struct unix_sock *usk) | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 255 | { | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 256 | atomic_long_inc(&usk->inflight); | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 257 | } | 
|  | 258 |  | 
| Pavel Emelyanov | 5c80f1a | 2007-11-10 22:07:13 -0800 | [diff] [blame] | 259 | static void inc_inflight_move_tail(struct unix_sock *u) | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 260 | { | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 261 | atomic_long_inc(&u->inflight); | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 262 | /* | 
| Miklos Szeredi | 6209344 | 2008-11-09 15:23:57 +0100 | [diff] [blame] | 263 | * If this still might be part of a cycle, move it to the end | 
|  | 264 | * of the list, so that it's checked even if it was already | 
|  | 265 | * passed over | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 266 | */ | 
| Miklos Szeredi | 6209344 | 2008-11-09 15:23:57 +0100 | [diff] [blame] | 267 | if (u->gc_maybe_cycle) | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 268 | list_move_tail(&u->link, &gc_candidates); | 
|  | 269 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 |  | 
| dann frazier | 5f23b73 | 2008-11-26 15:32:27 -0800 | [diff] [blame] | 271 | static bool gc_in_progress = false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 |  | 
| dann frazier | 5f23b73 | 2008-11-26 15:32:27 -0800 | [diff] [blame] | 273 | void wait_for_unix_gc(void) | 
|  | 274 | { | 
|  | 275 | wait_event(unix_gc_wait, gc_in_progress == false); | 
|  | 276 | } | 
|  | 277 |  | 
|  | 278 | /* The external entry point: unix_gc() */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | void unix_gc(void) | 
|  | 280 | { | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 281 | struct unix_sock *u; | 
|  | 282 | struct unix_sock *next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | struct sk_buff_head hitlist; | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 284 | struct list_head cursor; | 
| Miklos Szeredi | 6209344 | 2008-11-09 15:23:57 +0100 | [diff] [blame] | 285 | LIST_HEAD(not_cycle_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 287 | spin_lock(&unix_gc_lock); | 
|  | 288 |  | 
|  | 289 | /* Avoid a recursive GC. */ | 
|  | 290 | if (gc_in_progress) | 
|  | 291 | goto out; | 
|  | 292 |  | 
|  | 293 | gc_in_progress = true; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | /* | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 295 | * First, select candidates for garbage collection.  Only | 
|  | 296 | * in-flight sockets are considered, and from those only ones | 
|  | 297 | * which don't have any external reference. | 
|  | 298 | * | 
|  | 299 | * Holding unix_gc_lock will protect these candidates from | 
|  | 300 | * being detached, and hence from gaining an external | 
| Miklos Szeredi | 6209344 | 2008-11-09 15:23:57 +0100 | [diff] [blame] | 301 | * reference.  Since there are no possible receivers, all | 
|  | 302 | * buffers currently on the candidates' queues stay there | 
|  | 303 | * during the garbage collection. | 
|  | 304 | * | 
|  | 305 | * We also know that no new candidate can be added onto the | 
|  | 306 | * receive queues.  Other, non candidate sockets _can_ be | 
|  | 307 | * added to queue, so we must make sure only to touch | 
|  | 308 | * candidates. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | */ | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 310 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 311 | long total_refs; | 
|  | 312 | long inflight_refs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 314 | total_refs = file_count(u->sk.sk_socket->file); | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 315 | inflight_refs = atomic_long_read(&u->inflight); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 317 | BUG_ON(inflight_refs < 1); | 
|  | 318 | BUG_ON(total_refs < inflight_refs); | 
|  | 319 | if (total_refs == inflight_refs) { | 
|  | 320 | list_move_tail(&u->link, &gc_candidates); | 
|  | 321 | u->gc_candidate = 1; | 
| Miklos Szeredi | 6209344 | 2008-11-09 15:23:57 +0100 | [diff] [blame] | 322 | u->gc_maybe_cycle = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | } | 
|  | 325 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 326 | /* | 
|  | 327 | * Now remove all internal in-flight reference to children of | 
|  | 328 | * the candidates. | 
|  | 329 | */ | 
|  | 330 | list_for_each_entry(u, &gc_candidates, link) | 
|  | 331 | scan_children(&u->sk, dec_inflight, NULL); | 
|  | 332 |  | 
|  | 333 | /* | 
|  | 334 | * Restore the references for children of all candidates, | 
|  | 335 | * which have remaining references.  Do this recursively, so | 
|  | 336 | * only those remain, which form cyclic references. | 
|  | 337 | * | 
|  | 338 | * Use a "cursor" link, to make the list traversal safe, even | 
|  | 339 | * though elements might be moved about. | 
|  | 340 | */ | 
|  | 341 | list_add(&cursor, &gc_candidates); | 
|  | 342 | while (cursor.next != &gc_candidates) { | 
|  | 343 | u = list_entry(cursor.next, struct unix_sock, link); | 
|  | 344 |  | 
|  | 345 | /* Move cursor to after the current position. */ | 
|  | 346 | list_move(&cursor, &u->link); | 
|  | 347 |  | 
| Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 348 | if (atomic_long_read(&u->inflight) > 0) { | 
| Miklos Szeredi | 6209344 | 2008-11-09 15:23:57 +0100 | [diff] [blame] | 349 | list_move_tail(&u->link, ¬_cycle_list); | 
|  | 350 | u->gc_maybe_cycle = 0; | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 351 | scan_children(&u->sk, inc_inflight_move_tail, NULL); | 
|  | 352 | } | 
|  | 353 | } | 
|  | 354 | list_del(&cursor); | 
|  | 355 |  | 
|  | 356 | /* | 
| Miklos Szeredi | 6209344 | 2008-11-09 15:23:57 +0100 | [diff] [blame] | 357 | * not_cycle_list contains those sockets which do not make up a | 
|  | 358 | * cycle.  Restore these to the inflight list. | 
|  | 359 | */ | 
|  | 360 | while (!list_empty(¬_cycle_list)) { | 
|  | 361 | u = list_entry(not_cycle_list.next, struct unix_sock, link); | 
|  | 362 | u->gc_candidate = 0; | 
|  | 363 | list_move_tail(&u->link, &gc_inflight_list); | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 | /* | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 367 | * Now gc_candidates contains only garbage.  Restore original | 
|  | 368 | * inflight counters for these as well, and remove the skbuffs | 
|  | 369 | * which are creating the cycle(s). | 
|  | 370 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | skb_queue_head_init(&hitlist); | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 372 | list_for_each_entry(u, &gc_candidates, link) | 
| Jianjun Kong | e27dfce | 2008-11-01 21:38:31 -0700 | [diff] [blame] | 373 | scan_children(&u->sk, inc_inflight, &hitlist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 375 | spin_unlock(&unix_gc_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 |  | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 377 | /* Here we are. Hitlist is filled. Die. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | __skb_queue_purge(&hitlist); | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 379 |  | 
|  | 380 | spin_lock(&unix_gc_lock); | 
|  | 381 |  | 
|  | 382 | /* All candidates should have been detached by now. */ | 
|  | 383 | BUG_ON(!list_empty(&gc_candidates)); | 
|  | 384 | gc_in_progress = false; | 
| dann frazier | 5f23b73 | 2008-11-26 15:32:27 -0800 | [diff] [blame] | 385 | wake_up(&unix_gc_wait); | 
| Miklos Szeredi | 1fd05ba | 2007-07-11 14:22:39 -0700 | [diff] [blame] | 386 |  | 
|  | 387 | out: | 
|  | 388 | spin_unlock(&unix_gc_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | } |