| Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2008 Oracle.  All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or | 
 | 5 |  * modify it under the terms of the GNU General Public | 
 | 6 |  * License v2 as published by the Free Software Foundation. | 
 | 7 |  * | 
 | 8 |  * This program is distributed in the hope that it will be useful, | 
 | 9 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 10 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 11 |  * General Public License for more details. | 
 | 12 |  * | 
 | 13 |  * You should have received a copy of the GNU General Public | 
 | 14 |  * License along with this program; if not, write to the | 
 | 15 |  * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
 | 16 |  * Boston, MA 021110-1307, USA. | 
 | 17 |  */ | 
 | 18 | #include <linux/sched.h> | 
| Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 19 | #include <linux/pagemap.h> | 
 | 20 | #include <linux/spinlock.h> | 
 | 21 | #include <linux/page-flags.h> | 
| Chris Mason | 4881ee5 | 2008-07-24 09:51:08 -0400 | [diff] [blame] | 22 | #include <asm/bug.h> | 
| Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 23 | #include "ctree.h" | 
 | 24 | #include "extent_io.h" | 
 | 25 | #include "locking.h" | 
 | 26 |  | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 27 | void btrfs_assert_tree_read_locked(struct extent_buffer *eb); | 
 | 28 |  | 
 | 29 | /* | 
 | 30 |  * if we currently have a spinning reader or writer lock | 
 | 31 |  * (indicated by the rw flag) this will bump the count | 
 | 32 |  * of blocking holders and drop the spinlock. | 
 | 33 |  */ | 
 | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 35 | { | 
| Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 36 | 	if (eb->lock_nested) { | 
 | 37 | 		read_lock(&eb->lock); | 
 | 38 | 		if (eb->lock_nested && current->pid == eb->lock_owner) { | 
 | 39 | 			read_unlock(&eb->lock); | 
 | 40 | 			return; | 
 | 41 | 		} | 
 | 42 | 		read_unlock(&eb->lock); | 
 | 43 | 	} | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 44 | 	if (rw == BTRFS_WRITE_LOCK) { | 
 | 45 | 		if (atomic_read(&eb->blocking_writers) == 0) { | 
 | 46 | 			WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 
 | 47 | 			atomic_dec(&eb->spinning_writers); | 
 | 48 | 			btrfs_assert_tree_locked(eb); | 
 | 49 | 			atomic_inc(&eb->blocking_writers); | 
 | 50 | 			write_unlock(&eb->lock); | 
 | 51 | 		} | 
 | 52 | 	} else if (rw == BTRFS_READ_LOCK) { | 
 | 53 | 		btrfs_assert_tree_read_locked(eb); | 
 | 54 | 		atomic_inc(&eb->blocking_readers); | 
 | 55 | 		WARN_ON(atomic_read(&eb->spinning_readers) == 0); | 
 | 56 | 		atomic_dec(&eb->spinning_readers); | 
 | 57 | 		read_unlock(&eb->lock); | 
 | 58 | 	} | 
 | 59 | 	return; | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 60 | } | 
| Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 61 |  | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 62 | /* | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 63 |  * if we currently have a blocking lock, take the spinlock | 
 | 64 |  * and drop our blocking count | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 65 |  */ | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 67 | { | 
| Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 68 | 	if (eb->lock_nested) { | 
 | 69 | 		read_lock(&eb->lock); | 
 | 70 | 		if (&eb->lock_nested && current->pid == eb->lock_owner) { | 
 | 71 | 			read_unlock(&eb->lock); | 
 | 72 | 			return; | 
 | 73 | 		} | 
 | 74 | 		read_unlock(&eb->lock); | 
 | 75 | 	} | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 76 | 	if (rw == BTRFS_WRITE_LOCK_BLOCKING) { | 
 | 77 | 		BUG_ON(atomic_read(&eb->blocking_writers) != 1); | 
 | 78 | 		write_lock(&eb->lock); | 
 | 79 | 		WARN_ON(atomic_read(&eb->spinning_writers)); | 
 | 80 | 		atomic_inc(&eb->spinning_writers); | 
 | 81 | 		if (atomic_dec_and_test(&eb->blocking_writers)) | 
 | 82 | 			wake_up(&eb->write_lock_wq); | 
 | 83 | 	} else if (rw == BTRFS_READ_LOCK_BLOCKING) { | 
 | 84 | 		BUG_ON(atomic_read(&eb->blocking_readers) == 0); | 
 | 85 | 		read_lock(&eb->lock); | 
 | 86 | 		atomic_inc(&eb->spinning_readers); | 
 | 87 | 		if (atomic_dec_and_test(&eb->blocking_readers)) | 
 | 88 | 			wake_up(&eb->read_lock_wq); | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 89 | 	} | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 90 | 	return; | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 91 | } | 
 | 92 |  | 
 | 93 | /* | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 94 |  * take a spinning read lock.  This will wait for any blocking | 
 | 95 |  * writers | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 96 |  */ | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 97 | void btrfs_tree_read_lock(struct extent_buffer *eb) | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 98 | { | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 99 | again: | 
| Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 100 | 	read_lock(&eb->lock); | 
 | 101 | 	if (atomic_read(&eb->blocking_writers) && | 
 | 102 | 	    current->pid == eb->lock_owner) { | 
 | 103 | 		/* | 
 | 104 | 		 * This extent is already write-locked by our thread. We allow | 
 | 105 | 		 * an additional read lock to be added because it's for the same | 
 | 106 | 		 * thread. btrfs_find_all_roots() depends on this as it may be | 
 | 107 | 		 * called on a partly (write-)locked tree. | 
 | 108 | 		 */ | 
 | 109 | 		BUG_ON(eb->lock_nested); | 
 | 110 | 		eb->lock_nested = 1; | 
 | 111 | 		read_unlock(&eb->lock); | 
 | 112 | 		return; | 
 | 113 | 	} | 
 | 114 | 	read_unlock(&eb->lock); | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 115 | 	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | 
 | 116 | 	read_lock(&eb->lock); | 
 | 117 | 	if (atomic_read(&eb->blocking_writers)) { | 
 | 118 | 		read_unlock(&eb->lock); | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 119 | 		goto again; | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 120 | 	} | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 121 | 	atomic_inc(&eb->read_locks); | 
 | 122 | 	atomic_inc(&eb->spinning_readers); | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 123 | } | 
 | 124 |  | 
 | 125 | /* | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 126 |  * returns 1 if we get the read lock and 0 if we don't | 
 | 127 |  * this won't wait for blocking writers | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 128 |  */ | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 129 | int btrfs_try_tree_read_lock(struct extent_buffer *eb) | 
| Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 130 | { | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 131 | 	if (atomic_read(&eb->blocking_writers)) | 
 | 132 | 		return 0; | 
| Chris Mason | 66d7e85 | 2009-03-12 20:12:45 -0400 | [diff] [blame] | 133 |  | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 134 | 	read_lock(&eb->lock); | 
 | 135 | 	if (atomic_read(&eb->blocking_writers)) { | 
 | 136 | 		read_unlock(&eb->lock); | 
 | 137 | 		return 0; | 
| Chris Mason | f9efa9c | 2008-06-25 16:14:04 -0400 | [diff] [blame] | 138 | 	} | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 139 | 	atomic_inc(&eb->read_locks); | 
 | 140 | 	atomic_inc(&eb->spinning_readers); | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 141 | 	return 1; | 
 | 142 | } | 
 | 143 |  | 
 | 144 | /* | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 145 |  * returns 1 if we get the read lock and 0 if we don't | 
 | 146 |  * this won't wait for blocking writers or readers | 
 | 147 |  */ | 
 | 148 | int btrfs_try_tree_write_lock(struct extent_buffer *eb) | 
 | 149 | { | 
 | 150 | 	if (atomic_read(&eb->blocking_writers) || | 
 | 151 | 	    atomic_read(&eb->blocking_readers)) | 
 | 152 | 		return 0; | 
 | 153 | 	write_lock(&eb->lock); | 
 | 154 | 	if (atomic_read(&eb->blocking_writers) || | 
 | 155 | 	    atomic_read(&eb->blocking_readers)) { | 
 | 156 | 		write_unlock(&eb->lock); | 
 | 157 | 		return 0; | 
 | 158 | 	} | 
 | 159 | 	atomic_inc(&eb->write_locks); | 
 | 160 | 	atomic_inc(&eb->spinning_writers); | 
| Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 161 | 	eb->lock_owner = current->pid; | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 162 | 	return 1; | 
 | 163 | } | 
 | 164 |  | 
 | 165 | /* | 
 | 166 |  * drop a spinning read lock | 
 | 167 |  */ | 
 | 168 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | 
 | 169 | { | 
| Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 170 | 	if (eb->lock_nested) { | 
 | 171 | 		read_lock(&eb->lock); | 
 | 172 | 		if (eb->lock_nested && current->pid == eb->lock_owner) { | 
 | 173 | 			eb->lock_nested = 0; | 
 | 174 | 			read_unlock(&eb->lock); | 
 | 175 | 			return; | 
 | 176 | 		} | 
 | 177 | 		read_unlock(&eb->lock); | 
 | 178 | 	} | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 179 | 	btrfs_assert_tree_read_locked(eb); | 
 | 180 | 	WARN_ON(atomic_read(&eb->spinning_readers) == 0); | 
 | 181 | 	atomic_dec(&eb->spinning_readers); | 
 | 182 | 	atomic_dec(&eb->read_locks); | 
 | 183 | 	read_unlock(&eb->lock); | 
 | 184 | } | 
 | 185 |  | 
 | 186 | /* | 
 | 187 |  * drop a blocking read lock | 
 | 188 |  */ | 
 | 189 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | 
 | 190 | { | 
| Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 191 | 	if (eb->lock_nested) { | 
 | 192 | 		read_lock(&eb->lock); | 
 | 193 | 		if (eb->lock_nested && current->pid == eb->lock_owner) { | 
 | 194 | 			eb->lock_nested = 0; | 
 | 195 | 			read_unlock(&eb->lock); | 
 | 196 | 			return; | 
 | 197 | 		} | 
 | 198 | 		read_unlock(&eb->lock); | 
 | 199 | 	} | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 200 | 	btrfs_assert_tree_read_locked(eb); | 
 | 201 | 	WARN_ON(atomic_read(&eb->blocking_readers) == 0); | 
 | 202 | 	if (atomic_dec_and_test(&eb->blocking_readers)) | 
 | 203 | 		wake_up(&eb->read_lock_wq); | 
 | 204 | 	atomic_dec(&eb->read_locks); | 
 | 205 | } | 
 | 206 |  | 
 | 207 | /* | 
 | 208 |  * take a spinning write lock.  This will wait for both | 
 | 209 |  * blocking readers or writers | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 210 |  */ | 
 | 211 | int btrfs_tree_lock(struct extent_buffer *eb) | 
 | 212 | { | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 213 | again: | 
 | 214 | 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); | 
 | 215 | 	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | 
 | 216 | 	write_lock(&eb->lock); | 
 | 217 | 	if (atomic_read(&eb->blocking_readers)) { | 
 | 218 | 		write_unlock(&eb->lock); | 
 | 219 | 		wait_event(eb->read_lock_wq, | 
 | 220 | 			   atomic_read(&eb->blocking_readers) == 0); | 
 | 221 | 		goto again; | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 222 | 	} | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 223 | 	if (atomic_read(&eb->blocking_writers)) { | 
 | 224 | 		write_unlock(&eb->lock); | 
 | 225 | 		wait_event(eb->write_lock_wq, | 
 | 226 | 			   atomic_read(&eb->blocking_writers) == 0); | 
 | 227 | 		goto again; | 
 | 228 | 	} | 
 | 229 | 	WARN_ON(atomic_read(&eb->spinning_writers)); | 
 | 230 | 	atomic_inc(&eb->spinning_writers); | 
 | 231 | 	atomic_inc(&eb->write_locks); | 
| Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 232 | 	eb->lock_owner = current->pid; | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 233 | 	return 0; | 
 | 234 | } | 
 | 235 |  | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 236 | /* | 
 | 237 |  * drop a spinning or a blocking write lock. | 
 | 238 |  */ | 
| Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 239 | int btrfs_tree_unlock(struct extent_buffer *eb) | 
 | 240 | { | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 241 | 	int blockers = atomic_read(&eb->blocking_writers); | 
| Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 242 |  | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 243 | 	BUG_ON(blockers > 1); | 
 | 244 |  | 
 | 245 | 	btrfs_assert_tree_locked(eb); | 
 | 246 | 	atomic_dec(&eb->write_locks); | 
 | 247 |  | 
 | 248 | 	if (blockers) { | 
 | 249 | 		WARN_ON(atomic_read(&eb->spinning_writers)); | 
 | 250 | 		atomic_dec(&eb->blocking_writers); | 
 | 251 | 		smp_wmb(); | 
 | 252 | 		wake_up(&eb->write_lock_wq); | 
 | 253 | 	} else { | 
 | 254 | 		WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 
 | 255 | 		atomic_dec(&eb->spinning_writers); | 
 | 256 | 		write_unlock(&eb->lock); | 
 | 257 | 	} | 
| Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 258 | 	return 0; | 
 | 259 | } | 
 | 260 |  | 
| Chris Mason | b9447ef | 2009-03-09 11:45:38 -0400 | [diff] [blame] | 261 | void btrfs_assert_tree_locked(struct extent_buffer *eb) | 
| Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 262 | { | 
| Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 263 | 	BUG_ON(!atomic_read(&eb->write_locks)); | 
 | 264 | } | 
 | 265 |  | 
 | 266 | void btrfs_assert_tree_read_locked(struct extent_buffer *eb) | 
 | 267 | { | 
 | 268 | 	BUG_ON(!atomic_read(&eb->read_locks)); | 
| Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 269 | } |