| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 |  * mm/page-writeback.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 5 |  * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  * Contains functions related to writing back dirty pages at the | 
 | 8 |  * address_space level. | 
 | 9 |  * | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 10 |  * 10Apr2002	Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  *		Initial version | 
 | 12 |  */ | 
 | 13 |  | 
 | 14 | #include <linux/kernel.h> | 
 | 15 | #include <linux/module.h> | 
 | 16 | #include <linux/spinlock.h> | 
 | 17 | #include <linux/fs.h> | 
 | 18 | #include <linux/mm.h> | 
 | 19 | #include <linux/swap.h> | 
 | 20 | #include <linux/slab.h> | 
 | 21 | #include <linux/pagemap.h> | 
 | 22 | #include <linux/writeback.h> | 
 | 23 | #include <linux/init.h> | 
 | 24 | #include <linux/backing-dev.h> | 
| Andrew Morton | 55e829a | 2006-12-10 02:19:27 -0800 | [diff] [blame] | 25 | #include <linux/task_io_accounting_ops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/blkdev.h> | 
 | 27 | #include <linux/mpage.h> | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 28 | #include <linux/rmap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/percpu.h> | 
 | 30 | #include <linux/notifier.h> | 
 | 31 | #include <linux/smp.h> | 
 | 32 | #include <linux/sysctl.h> | 
 | 33 | #include <linux/cpu.h> | 
 | 34 | #include <linux/syscalls.h> | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 35 | #include <linux/buffer_head.h> | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 36 | #include <linux/pagevec.h> | 
| Dave Chinner | 028c2dd | 2010-07-07 13:24:07 +1000 | [diff] [blame] | 37 | #include <trace/events/writeback.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
 | 39 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | 
 | 41 |  * will look to see if it needs to force writeback or throttling. | 
 | 42 |  */ | 
 | 43 | static long ratelimit_pages = 32; | 
 | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | /* | 
 | 46 |  * When balance_dirty_pages decides that the caller needs to perform some | 
 | 47 |  * non-background writeback, this is how many pages it will attempt to write. | 
| Wu Fengguang | 3a2e9a5 | 2009-09-23 21:56:00 +0800 | [diff] [blame] | 48 |  * It should be somewhat larger than dirtied pages to ensure that reasonably | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 |  * large amounts of I/O are submitted. | 
 | 50 |  */ | 
| Wu Fengguang | 3a2e9a5 | 2009-09-23 21:56:00 +0800 | [diff] [blame] | 51 | static inline long sync_writeback_pages(unsigned long dirtied) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | { | 
| Wu Fengguang | 3a2e9a5 | 2009-09-23 21:56:00 +0800 | [diff] [blame] | 53 | 	if (dirtied < ratelimit_pages) | 
 | 54 | 		dirtied = ratelimit_pages; | 
 | 55 |  | 
 | 56 | 	return dirtied + dirtied / 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | } | 
 | 58 |  | 
 | 59 | /* The following parameters are exported via /proc/sys/vm */ | 
 | 60 |  | 
 | 61 | /* | 
| Jens Axboe | 5b0830c | 2009-09-23 19:37:09 +0200 | [diff] [blame] | 62 |  * Start background writeback (via writeback threads) at this percentage | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 |  */ | 
| Wu Fengguang | 1b5e62b | 2009-03-23 08:57:38 +0800 | [diff] [blame] | 64 | int dirty_background_ratio = 10; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  | 
 | 66 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 67 |  * dirty_background_bytes starts at 0 (disabled) so that it is a function of | 
 | 68 |  * dirty_background_ratio * the amount of dirtyable memory | 
 | 69 |  */ | 
 | 70 | unsigned long dirty_background_bytes; | 
 | 71 |  | 
 | 72 | /* | 
| Bron Gondwana | 195cf45 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 73 |  * free highmem will not be subtracted from the total free memory | 
 | 74 |  * for calculating free ratios if vm_highmem_is_dirtyable is true | 
 | 75 |  */ | 
 | 76 | int vm_highmem_is_dirtyable; | 
 | 77 |  | 
 | 78 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 |  * The generator of dirty data starts writeback at this percentage | 
 | 80 |  */ | 
| Wu Fengguang | 1b5e62b | 2009-03-23 08:57:38 +0800 | [diff] [blame] | 81 | int vm_dirty_ratio = 20; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 |  | 
 | 83 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 84 |  * vm_dirty_bytes starts at 0 (disabled) so that it is a function of | 
 | 85 |  * vm_dirty_ratio * the amount of dirtyable memory | 
 | 86 |  */ | 
 | 87 | unsigned long vm_dirty_bytes; | 
 | 88 |  | 
 | 89 | /* | 
| Alexey Dobriyan | 704503d | 2009-03-31 15:23:18 -0700 | [diff] [blame] | 90 |  * The interval between `kupdate'-style writebacks | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 |  */ | 
| Toshiyuki Okajima | 22ef37e | 2009-05-16 22:56:28 -0700 | [diff] [blame] | 92 | unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 |  | 
 | 94 | /* | 
| Alexey Dobriyan | 704503d | 2009-03-31 15:23:18 -0700 | [diff] [blame] | 95 |  * The longest time for which data is allowed to remain dirty | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  */ | 
| Toshiyuki Okajima | 22ef37e | 2009-05-16 22:56:28 -0700 | [diff] [blame] | 97 | unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 |  | 
 | 99 | /* | 
 | 100 |  * Flag that makes the machine dump writes/reads and block dirtyings. | 
 | 101 |  */ | 
 | 102 | int block_dump; | 
 | 103 |  | 
 | 104 | /* | 
| Bart Samwel | ed5b43f | 2006-03-24 03:15:49 -0800 | [diff] [blame] | 105 |  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: | 
 | 106 |  * a full sync is triggered after this time elapses without any disk activity. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 |  */ | 
 | 108 | int laptop_mode; | 
 | 109 |  | 
 | 110 | EXPORT_SYMBOL(laptop_mode); | 
 | 111 |  | 
 | 112 | /* End of sysctl-exported parameters */ | 
 | 113 |  | 
 | 114 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | /* | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 116 |  * Scale the writeback cache size proportional to the relative writeout speeds. | 
 | 117 |  * | 
 | 118 |  * We do this by keeping a floating proportion between BDIs, based on page | 
 | 119 |  * writeback completions [end_page_writeback()]. Those devices that write out | 
 | 120 |  * pages fastest will get the larger share, while the slower will get a smaller | 
 | 121 |  * share. | 
 | 122 |  * | 
 | 123 |  * We use page writeout completions because we are interested in getting rid of | 
 | 124 |  * dirty pages. Having them written out is the primary goal. | 
 | 125 |  * | 
 | 126 |  * We introduce a concept of time, a period over which we measure these events, | 
 | 127 |  * because demand can/will vary over time. The length of this period itself is | 
 | 128 |  * measured in page writeback completions. | 
 | 129 |  * | 
 | 130 |  */ | 
 | 131 | static struct prop_descriptor vm_completions; | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 132 | static struct prop_descriptor vm_dirties; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 133 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 134 | /* | 
 | 135 |  * couple the period to the dirty_ratio: | 
 | 136 |  * | 
 | 137 |  *   period/2 ~ roundup_pow_of_two(dirty limit) | 
 | 138 |  */ | 
 | 139 | static int calc_period_shift(void) | 
 | 140 | { | 
 | 141 | 	unsigned long dirty_total; | 
 | 142 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 143 | 	if (vm_dirty_bytes) | 
 | 144 | 		dirty_total = vm_dirty_bytes / PAGE_SIZE; | 
 | 145 | 	else | 
 | 146 | 		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / | 
 | 147 | 				100; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 148 | 	return 2 + ilog2(dirty_total - 1); | 
 | 149 | } | 
 | 150 |  | 
 | 151 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 152 |  * update the period when the dirty threshold changes. | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 153 |  */ | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 154 | static void update_completion_period(void) | 
 | 155 | { | 
 | 156 | 	int shift = calc_period_shift(); | 
 | 157 | 	prop_change_shift(&vm_completions, shift); | 
 | 158 | 	prop_change_shift(&vm_dirties, shift); | 
 | 159 | } | 
 | 160 |  | 
 | 161 | int dirty_background_ratio_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 162 | 		void __user *buffer, size_t *lenp, | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 163 | 		loff_t *ppos) | 
 | 164 | { | 
 | 165 | 	int ret; | 
 | 166 |  | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 167 | 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 168 | 	if (ret == 0 && write) | 
 | 169 | 		dirty_background_bytes = 0; | 
 | 170 | 	return ret; | 
 | 171 | } | 
 | 172 |  | 
 | 173 | int dirty_background_bytes_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 174 | 		void __user *buffer, size_t *lenp, | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 175 | 		loff_t *ppos) | 
 | 176 | { | 
 | 177 | 	int ret; | 
 | 178 |  | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 179 | 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 180 | 	if (ret == 0 && write) | 
 | 181 | 		dirty_background_ratio = 0; | 
 | 182 | 	return ret; | 
 | 183 | } | 
 | 184 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 185 | int dirty_ratio_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 186 | 		void __user *buffer, size_t *lenp, | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 187 | 		loff_t *ppos) | 
 | 188 | { | 
 | 189 | 	int old_ratio = vm_dirty_ratio; | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 190 | 	int ret; | 
 | 191 |  | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 192 | 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 193 | 	if (ret == 0 && write && vm_dirty_ratio != old_ratio) { | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 194 | 		update_completion_period(); | 
 | 195 | 		vm_dirty_bytes = 0; | 
 | 196 | 	} | 
 | 197 | 	return ret; | 
 | 198 | } | 
 | 199 |  | 
 | 200 |  | 
 | 201 | int dirty_bytes_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 202 | 		void __user *buffer, size_t *lenp, | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 203 | 		loff_t *ppos) | 
 | 204 | { | 
| Sven Wegener | fc3501d | 2009-02-11 13:04:23 -0800 | [diff] [blame] | 205 | 	unsigned long old_bytes = vm_dirty_bytes; | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 206 | 	int ret; | 
 | 207 |  | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 208 | 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 209 | 	if (ret == 0 && write && vm_dirty_bytes != old_bytes) { | 
 | 210 | 		update_completion_period(); | 
 | 211 | 		vm_dirty_ratio = 0; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 212 | 	} | 
 | 213 | 	return ret; | 
 | 214 | } | 
 | 215 |  | 
 | 216 | /* | 
 | 217 |  * Increment the BDI's writeout completion count and the global writeout | 
 | 218 |  * completion count. Called from test_clear_page_writeback(). | 
 | 219 |  */ | 
 | 220 | static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) | 
 | 221 | { | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 222 | 	__prop_inc_percpu_max(&vm_completions, &bdi->completions, | 
 | 223 | 			      bdi->max_prop_frac); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 224 | } | 
 | 225 |  | 
| Miklos Szeredi | dd5656e | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 226 | void bdi_writeout_inc(struct backing_dev_info *bdi) | 
 | 227 | { | 
 | 228 | 	unsigned long flags; | 
 | 229 |  | 
 | 230 | 	local_irq_save(flags); | 
 | 231 | 	__bdi_writeout_inc(bdi); | 
 | 232 | 	local_irq_restore(flags); | 
 | 233 | } | 
 | 234 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | 
 | 235 |  | 
| Nick Piggin | 1cf6e7d | 2009-02-18 14:48:18 -0800 | [diff] [blame] | 236 | void task_dirty_inc(struct task_struct *tsk) | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 237 | { | 
 | 238 | 	prop_inc_single(&vm_dirties, &tsk->dirties); | 
 | 239 | } | 
 | 240 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 241 | /* | 
 | 242 |  * Obtain an accurate fraction of the BDI's portion. | 
 | 243 |  */ | 
 | 244 | static void bdi_writeout_fraction(struct backing_dev_info *bdi, | 
 | 245 | 		long *numerator, long *denominator) | 
 | 246 | { | 
 | 247 | 	if (bdi_cap_writeback_dirty(bdi)) { | 
 | 248 | 		prop_fraction_percpu(&vm_completions, &bdi->completions, | 
 | 249 | 				numerator, denominator); | 
 | 250 | 	} else { | 
 | 251 | 		*numerator = 0; | 
 | 252 | 		*denominator = 1; | 
 | 253 | 	} | 
 | 254 | } | 
 | 255 |  | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 256 | static inline void task_dirties_fraction(struct task_struct *tsk, | 
 | 257 | 		long *numerator, long *denominator) | 
 | 258 | { | 
 | 259 | 	prop_fraction_single(&vm_dirties, &tsk->dirties, | 
 | 260 | 				numerator, denominator); | 
 | 261 | } | 
 | 262 |  | 
 | 263 | /* | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 264 |  * task_dirty_limit - scale down dirty throttling threshold for one task | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 265 |  * | 
 | 266 |  * task specific dirty limit: | 
 | 267 |  * | 
 | 268 |  *   dirty -= (dirty/8) * p_{t} | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 269 |  * | 
 | 270 |  * To protect light/slow dirtying tasks from heavier/fast ones, we start | 
 | 271 |  * throttling individual tasks before reaching the bdi dirty limit. | 
 | 272 |  * Relatively low thresholds will be allocated to heavy dirtiers. So when | 
 | 273 |  * dirty pages grow large, heavy dirtiers will be throttled first, which will | 
 | 274 |  * effectively curb the growth of dirty pages. Light dirtiers with high enough | 
 | 275 |  * dirty threshold may never get throttled. | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 276 |  */ | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 277 | static unsigned long task_dirty_limit(struct task_struct *tsk, | 
 | 278 | 				       unsigned long bdi_dirty) | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 279 | { | 
 | 280 | 	long numerator, denominator; | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 281 | 	unsigned long dirty = bdi_dirty; | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 282 | 	u64 inv = dirty >> 3; | 
 | 283 |  | 
 | 284 | 	task_dirties_fraction(tsk, &numerator, &denominator); | 
 | 285 | 	inv *= numerator; | 
 | 286 | 	do_div(inv, denominator); | 
 | 287 |  | 
 | 288 | 	dirty -= inv; | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 289 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 290 | 	return max(dirty, bdi_dirty/2); | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 291 | } | 
 | 292 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 293 | /* | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 294 |  * | 
 | 295 |  */ | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 296 | static unsigned int bdi_min_ratio; | 
 | 297 |  | 
 | 298 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) | 
 | 299 | { | 
 | 300 | 	int ret = 0; | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 301 |  | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 302 | 	spin_lock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 303 | 	if (min_ratio > bdi->max_ratio) { | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 304 | 		ret = -EINVAL; | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 305 | 	} else { | 
 | 306 | 		min_ratio -= bdi->min_ratio; | 
 | 307 | 		if (bdi_min_ratio + min_ratio < 100) { | 
 | 308 | 			bdi_min_ratio += min_ratio; | 
 | 309 | 			bdi->min_ratio += min_ratio; | 
 | 310 | 		} else { | 
 | 311 | 			ret = -EINVAL; | 
 | 312 | 		} | 
 | 313 | 	} | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 314 | 	spin_unlock_bh(&bdi_lock); | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 315 |  | 
 | 316 | 	return ret; | 
 | 317 | } | 
 | 318 |  | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 319 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) | 
 | 320 | { | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 321 | 	int ret = 0; | 
 | 322 |  | 
 | 323 | 	if (max_ratio > 100) | 
 | 324 | 		return -EINVAL; | 
 | 325 |  | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 326 | 	spin_lock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 327 | 	if (bdi->min_ratio > max_ratio) { | 
 | 328 | 		ret = -EINVAL; | 
 | 329 | 	} else { | 
 | 330 | 		bdi->max_ratio = max_ratio; | 
 | 331 | 		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; | 
 | 332 | 	} | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 333 | 	spin_unlock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 334 |  | 
 | 335 | 	return ret; | 
 | 336 | } | 
 | 337 | EXPORT_SYMBOL(bdi_set_max_ratio); | 
 | 338 |  | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 339 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 |  * Work out the current dirty-memory clamping and background writeout | 
 | 341 |  * thresholds. | 
 | 342 |  * | 
 | 343 |  * The main aim here is to lower them aggressively if there is a lot of mapped | 
 | 344 |  * memory around.  To avoid stressing page reclaim with lots of unreclaimable | 
 | 345 |  * pages.  It is better to clamp down on writers than to start swapping, and | 
 | 346 |  * performing lots of scanning. | 
 | 347 |  * | 
 | 348 |  * We only allow 1/2 of the currently-unmapped memory to be dirtied. | 
 | 349 |  * | 
 | 350 |  * We don't permit the clamping level to fall below 5% - that is getting rather | 
 | 351 |  * excessive. | 
 | 352 |  * | 
 | 353 |  * We make sure that the background writeout level is below the adjusted | 
 | 354 |  * clamping level. | 
 | 355 |  */ | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 356 |  | 
 | 357 | static unsigned long highmem_dirtyable_memory(unsigned long total) | 
 | 358 | { | 
 | 359 | #ifdef CONFIG_HIGHMEM | 
 | 360 | 	int node; | 
 | 361 | 	unsigned long x = 0; | 
 | 362 |  | 
| Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 363 | 	for_each_node_state(node, N_HIGH_MEMORY) { | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 364 | 		struct zone *z = | 
 | 365 | 			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | 
 | 366 |  | 
| Wu Fengguang | adea02a | 2009-09-21 17:01:42 -0700 | [diff] [blame] | 367 | 		x += zone_page_state(z, NR_FREE_PAGES) + | 
 | 368 | 		     zone_reclaimable_pages(z); | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 369 | 	} | 
 | 370 | 	/* | 
 | 371 | 	 * Make sure that the number of highmem pages is never larger | 
 | 372 | 	 * than the number of the total dirtyable memory. This can only | 
 | 373 | 	 * occur in very strange VM situations but we want to make sure | 
 | 374 | 	 * that this does not occur. | 
 | 375 | 	 */ | 
 | 376 | 	return min(x, total); | 
 | 377 | #else | 
 | 378 | 	return 0; | 
 | 379 | #endif | 
 | 380 | } | 
 | 381 |  | 
| Steven Rostedt | 3eefae9 | 2008-05-12 21:21:04 +0200 | [diff] [blame] | 382 | /** | 
 | 383 |  * determine_dirtyable_memory - amount of memory that may be used | 
 | 384 |  * | 
 | 385 |  * Returns the numebr of pages that can currently be freed and used | 
 | 386 |  * by the kernel for direct mappings. | 
 | 387 |  */ | 
 | 388 | unsigned long determine_dirtyable_memory(void) | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 389 | { | 
 | 390 | 	unsigned long x; | 
 | 391 |  | 
| Wu Fengguang | adea02a | 2009-09-21 17:01:42 -0700 | [diff] [blame] | 392 | 	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); | 
| Bron Gondwana | 195cf45 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 393 |  | 
 | 394 | 	if (!vm_highmem_is_dirtyable) | 
 | 395 | 		x -= highmem_dirtyable_memory(x); | 
 | 396 |  | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 397 | 	return x + 1;	/* Ensure that we never return 0 */ | 
 | 398 | } | 
 | 399 |  | 
| Randy Dunlap | 03ab450 | 2010-08-14 13:05:17 -0700 | [diff] [blame] | 400 | /* | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 401 |  * global_dirty_limits - background-writeback and dirty-throttling thresholds | 
 | 402 |  * | 
 | 403 |  * Calculate the dirty thresholds based on sysctl parameters | 
 | 404 |  * - vm.dirty_background_ratio  or  vm.dirty_background_bytes | 
 | 405 |  * - vm.dirty_ratio             or  vm.dirty_bytes | 
 | 406 |  * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and | 
| Minchan Kim | ebd1373 | 2011-01-04 01:36:48 +0900 | [diff] [blame] | 407 |  * real-time tasks. | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 408 |  */ | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 409 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | { | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 411 | 	unsigned long background; | 
 | 412 | 	unsigned long dirty; | 
| Minchan Kim | 240c879 | 2011-01-13 15:46:27 -0800 | [diff] [blame] | 413 | 	unsigned long uninitialized_var(available_memory); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | 	struct task_struct *tsk; | 
 | 415 |  | 
| Minchan Kim | 240c879 | 2011-01-13 15:46:27 -0800 | [diff] [blame] | 416 | 	if (!vm_dirty_bytes || !dirty_background_bytes) | 
 | 417 | 		available_memory = determine_dirtyable_memory(); | 
 | 418 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 419 | 	if (vm_dirty_bytes) | 
 | 420 | 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | 
| Wu Fengguang | 4cbec4c | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 421 | 	else | 
 | 422 | 		dirty = (vm_dirty_ratio * available_memory) / 100; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 424 | 	if (dirty_background_bytes) | 
 | 425 | 		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | 
 | 426 | 	else | 
 | 427 | 		background = (dirty_background_ratio * available_memory) / 100; | 
 | 428 |  | 
 | 429 | 	if (background >= dirty) | 
 | 430 | 		background = dirty / 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | 	tsk = current; | 
 | 432 | 	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | 
 | 433 | 		background += background / 4; | 
 | 434 | 		dirty += dirty / 4; | 
 | 435 | 	} | 
 | 436 | 	*pbackground = background; | 
 | 437 | 	*pdirty = dirty; | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 438 | } | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 439 |  | 
| Randy Dunlap | 03ab450 | 2010-08-14 13:05:17 -0700 | [diff] [blame] | 440 | /* | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 441 |  * bdi_dirty_limit - @bdi's share of dirty throttling threshold | 
 | 442 |  * | 
 | 443 |  * Allocate high/low dirty limits to fast/slow devices, in order to prevent | 
 | 444 |  * - starving fast devices | 
 | 445 |  * - piling up dirty pages (that will take long time to sync) on slow devices | 
 | 446 |  * | 
 | 447 |  * The bdi's share of dirty limit will be adapting to its throughput and | 
 | 448 |  * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. | 
 | 449 |  */ | 
 | 450 | unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 451 | { | 
 | 452 | 	u64 bdi_dirty; | 
 | 453 | 	long numerator, denominator; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 454 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 455 | 	/* | 
 | 456 | 	 * Calculate this BDI's share of the dirty ratio. | 
 | 457 | 	 */ | 
 | 458 | 	bdi_writeout_fraction(bdi, &numerator, &denominator); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 459 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 460 | 	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; | 
 | 461 | 	bdi_dirty *= numerator; | 
 | 462 | 	do_div(bdi_dirty, denominator); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 463 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 464 | 	bdi_dirty += (dirty * bdi->min_ratio) / 100; | 
 | 465 | 	if (bdi_dirty > (dirty * bdi->max_ratio) / 100) | 
 | 466 | 		bdi_dirty = dirty * bdi->max_ratio / 100; | 
 | 467 |  | 
 | 468 | 	return bdi_dirty; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | } | 
 | 470 |  | 
 | 471 | /* | 
 | 472 |  * balance_dirty_pages() must be called by processes which are generating dirty | 
 | 473 |  * data.  It looks at the number of dirty pages in the machine and will force | 
 | 474 |  * the caller to perform writeback if the system is over `vm_dirty_ratio'. | 
| Jens Axboe | 5b0830c | 2009-09-23 19:37:09 +0200 | [diff] [blame] | 475 |  * If we're over `background_thresh' then the writeback threads are woken to | 
 | 476 |  * perform some writeout. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 |  */ | 
| Wu Fengguang | 3a2e9a5 | 2009-09-23 21:56:00 +0800 | [diff] [blame] | 478 | static void balance_dirty_pages(struct address_space *mapping, | 
 | 479 | 				unsigned long write_chunk) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | { | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 481 | 	long nr_reclaimable, bdi_nr_reclaimable; | 
 | 482 | 	long nr_writeback, bdi_nr_writeback; | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 483 | 	unsigned long background_thresh; | 
 | 484 | 	unsigned long dirty_thresh; | 
 | 485 | 	unsigned long bdi_thresh; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | 	unsigned long pages_written = 0; | 
| Jens Axboe | 87c6a9b | 2009-09-17 19:59:14 +0200 | [diff] [blame] | 487 | 	unsigned long pause = 1; | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 488 | 	bool dirty_exceeded = false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | 	struct backing_dev_info *bdi = mapping->backing_dev_info; | 
 | 490 |  | 
 | 491 | 	for (;;) { | 
 | 492 | 		struct writeback_control wbc = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | 			.sync_mode	= WB_SYNC_NONE, | 
 | 494 | 			.older_than_this = NULL, | 
 | 495 | 			.nr_to_write	= write_chunk, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 496 | 			.range_cyclic	= 1, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | 		}; | 
 | 498 |  | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 499 | 		nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 
 | 500 | 					global_page_state(NR_UNSTABLE_NFS); | 
 | 501 | 		nr_writeback = global_page_state(NR_WRITEBACK); | 
 | 502 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 503 | 		global_dirty_limits(&background_thresh, &dirty_thresh); | 
 | 504 |  | 
 | 505 | 		/* | 
 | 506 | 		 * Throttle it only when the background writeback cannot | 
 | 507 | 		 * catch-up. This avoids (excessively) small writeouts | 
 | 508 | 		 * when the bdi limits are ramping up. | 
 | 509 | 		 */ | 
| Wu Fengguang | 4cbec4c | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 510 | 		if (nr_reclaimable + nr_writeback <= | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 511 | 				(background_thresh + dirty_thresh) / 2) | 
 | 512 | 			break; | 
 | 513 |  | 
 | 514 | 		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | 
 | 515 | 		bdi_thresh = task_dirty_limit(current, bdi_thresh); | 
 | 516 |  | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 517 | 		/* | 
 | 518 | 		 * In order to avoid the stacked BDI deadlock we need | 
 | 519 | 		 * to ensure we accurately count the 'dirty' pages when | 
 | 520 | 		 * the threshold is low. | 
 | 521 | 		 * | 
 | 522 | 		 * Otherwise it would be possible to get thresh+n pages | 
 | 523 | 		 * reported dirty, even though there are thresh-m pages | 
 | 524 | 		 * actually dirty; with m+n sitting in the percpu | 
 | 525 | 		 * deltas. | 
 | 526 | 		 */ | 
 | 527 | 		if (bdi_thresh < 2*bdi_stat_error(bdi)) { | 
 | 528 | 			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); | 
 | 529 | 			bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); | 
 | 530 | 		} else { | 
 | 531 | 			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 
 | 532 | 			bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | 
 | 533 | 		} | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 534 |  | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 535 | 		/* | 
 | 536 | 		 * The bdi thresh is somehow "soft" limit derived from the | 
 | 537 | 		 * global "hard" limit. The former helps to prevent heavy IO | 
 | 538 | 		 * bdi or process from holding back light ones; The latter is | 
 | 539 | 		 * the last resort safeguard. | 
 | 540 | 		 */ | 
 | 541 | 		dirty_exceeded = | 
| Wu Fengguang | 4cbec4c | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 542 | 			(bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) | 
 | 543 | 			|| (nr_reclaimable + nr_writeback > dirty_thresh); | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 544 |  | 
 | 545 | 		if (!dirty_exceeded) | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 546 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 548 | 		if (!bdi->dirty_exceeded) | 
 | 549 | 			bdi->dirty_exceeded = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 |  | 
 | 551 | 		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable. | 
 | 552 | 		 * Unstable writes are a feature of certain networked | 
 | 553 | 		 * filesystems (i.e. NFS) in which data may have been | 
 | 554 | 		 * written to the server's write cache, but has not yet | 
 | 555 | 		 * been flushed to permanent storage. | 
| Richard Kennedy | d7831a0 | 2009-06-30 11:41:35 -0700 | [diff] [blame] | 556 | 		 * Only move pages to writeback if this bdi is over its | 
 | 557 | 		 * threshold otherwise wait until the disk writes catch | 
 | 558 | 		 * up. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | 		 */ | 
| Dave Chinner | 028c2dd | 2010-07-07 13:24:07 +1000 | [diff] [blame] | 560 | 		trace_wbc_balance_dirty_start(&wbc, bdi); | 
| Richard Kennedy | d7831a0 | 2009-06-30 11:41:35 -0700 | [diff] [blame] | 561 | 		if (bdi_nr_reclaimable > bdi_thresh) { | 
| Christoph Hellwig | 9c3a8ee | 2010-06-10 12:07:27 +0200 | [diff] [blame] | 562 | 			writeback_inodes_wb(&bdi->wb, &wbc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | 			pages_written += write_chunk - wbc.nr_to_write; | 
| Dave Chinner | 028c2dd | 2010-07-07 13:24:07 +1000 | [diff] [blame] | 564 | 			trace_wbc_balance_dirty_written(&wbc, bdi); | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 565 | 			if (pages_written >= write_chunk) | 
 | 566 | 				break;		/* We've done our duty */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | 		} | 
| Dave Chinner | 028c2dd | 2010-07-07 13:24:07 +1000 | [diff] [blame] | 568 | 		trace_wbc_balance_dirty_wait(&wbc, bdi); | 
| Wu Fengguang | d153ba6 | 2010-12-21 17:24:21 -0800 | [diff] [blame] | 569 | 		__set_current_state(TASK_UNINTERRUPTIBLE); | 
| Wu Fengguang | d25105e | 2009-10-09 12:40:42 +0200 | [diff] [blame] | 570 | 		io_schedule_timeout(pause); | 
| Jens Axboe | 87c6a9b | 2009-09-17 19:59:14 +0200 | [diff] [blame] | 571 |  | 
 | 572 | 		/* | 
 | 573 | 		 * Increase the delay for each loop, up to our previous | 
 | 574 | 		 * default of taking a 100ms nap. | 
 | 575 | 		 */ | 
 | 576 | 		pause <<= 1; | 
 | 577 | 		if (pause > HZ / 10) | 
 | 578 | 			pause = HZ / 10; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | 	} | 
 | 580 |  | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 581 | 	if (!dirty_exceeded && bdi->dirty_exceeded) | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 582 | 		bdi->dirty_exceeded = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 |  | 
 | 584 | 	if (writeback_in_progress(bdi)) | 
| Jens Axboe | 5b0830c | 2009-09-23 19:37:09 +0200 | [diff] [blame] | 585 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 |  | 
 | 587 | 	/* | 
 | 588 | 	 * In laptop mode, we wait until hitting the higher threshold before | 
 | 589 | 	 * starting background writeout, and then write out all the way down | 
 | 590 | 	 * to the lower threshold.  So slow writers cause minimal disk activity. | 
 | 591 | 	 * | 
 | 592 | 	 * In normal mode, we start background writeout at the lower | 
 | 593 | 	 * background_thresh, to keep the amount of dirty memory low. | 
 | 594 | 	 */ | 
 | 595 | 	if ((laptop_mode && pages_written) || | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 596 | 	    (!laptop_mode && (nr_reclaimable > background_thresh))) | 
| Christoph Hellwig | c544419 | 2010-06-08 18:15:15 +0200 | [diff] [blame] | 597 | 		bdi_start_background_writeback(bdi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | } | 
 | 599 |  | 
| Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 600 | void set_page_dirty_balance(struct page *page, int page_mkwrite) | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 601 | { | 
| Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 602 | 	if (set_page_dirty(page) || page_mkwrite) { | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 603 | 		struct address_space *mapping = page_mapping(page); | 
 | 604 |  | 
 | 605 | 		if (mapping) | 
 | 606 | 			balance_dirty_pages_ratelimited(mapping); | 
 | 607 | 	} | 
 | 608 | } | 
 | 609 |  | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 610 | static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; | 
 | 611 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | /** | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 613 |  * balance_dirty_pages_ratelimited_nr - balance dirty memory state | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 614 |  * @mapping: address_space which was dirtied | 
| Martin Waitz | a580290 | 2006-04-02 13:59:55 +0200 | [diff] [blame] | 615 |  * @nr_pages_dirtied: number of pages which the caller has just dirtied | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 |  * | 
 | 617 |  * Processes which are dirtying memory should call in here once for each page | 
 | 618 |  * which was newly dirtied.  The function will periodically check the system's | 
 | 619 |  * dirty state and will initiate writeback if needed. | 
 | 620 |  * | 
 | 621 |  * On really big machines, get_writeback_state is expensive, so try to avoid | 
 | 622 |  * calling it too often (ratelimiting).  But once we're over the dirty memory | 
 | 623 |  * limit we decrease the ratelimiting by a lot, to prevent individual processes | 
 | 624 |  * from overshooting the limit by (ratelimit_pages) each. | 
 | 625 |  */ | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 626 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | 
 | 627 | 					unsigned long nr_pages_dirtied) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | { | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 629 | 	unsigned long ratelimit; | 
 | 630 | 	unsigned long *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 |  | 
 | 632 | 	ratelimit = ratelimit_pages; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 633 | 	if (mapping->backing_dev_info->dirty_exceeded) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | 		ratelimit = 8; | 
 | 635 |  | 
 | 636 | 	/* | 
 | 637 | 	 * Check the rate limiting. Also, we do not want to throttle real-time | 
 | 638 | 	 * tasks in balance_dirty_pages(). Period. | 
 | 639 | 	 */ | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 640 | 	preempt_disable(); | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 641 | 	p =  &__get_cpu_var(bdp_ratelimits); | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 642 | 	*p += nr_pages_dirtied; | 
 | 643 | 	if (unlikely(*p >= ratelimit)) { | 
| Wu Fengguang | 3a2e9a5 | 2009-09-23 21:56:00 +0800 | [diff] [blame] | 644 | 		ratelimit = sync_writeback_pages(*p); | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 645 | 		*p = 0; | 
 | 646 | 		preempt_enable(); | 
| Wu Fengguang | 3a2e9a5 | 2009-09-23 21:56:00 +0800 | [diff] [blame] | 647 | 		balance_dirty_pages(mapping, ratelimit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | 		return; | 
 | 649 | 	} | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 650 | 	preempt_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | } | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 652 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 |  | 
| Andrew Morton | 232ea4d | 2007-02-28 20:13:21 -0800 | [diff] [blame] | 654 | void throttle_vm_writeout(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | { | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 656 | 	unsigned long background_thresh; | 
 | 657 | 	unsigned long dirty_thresh; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 |  | 
 | 659 |         for ( ; ; ) { | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 660 | 		global_dirty_limits(&background_thresh, &dirty_thresh); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 |  | 
 | 662 |                 /* | 
 | 663 |                  * Boost the allowable dirty threshold a bit for page | 
 | 664 |                  * allocators so they don't get DoS'ed by heavy writers | 
 | 665 |                  */ | 
 | 666 |                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */ | 
 | 667 |  | 
| Christoph Lameter | c24f21b | 2006-06-30 01:55:42 -0700 | [diff] [blame] | 668 |                 if (global_page_state(NR_UNSTABLE_NFS) + | 
 | 669 | 			global_page_state(NR_WRITEBACK) <= dirty_thresh) | 
 | 670 |                         	break; | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 671 |                 congestion_wait(BLK_RW_ASYNC, HZ/10); | 
| Fengguang Wu | 369f238 | 2007-10-16 23:30:45 -0700 | [diff] [blame] | 672 |  | 
 | 673 | 		/* | 
 | 674 | 		 * The caller might hold locks which can prevent IO completion | 
 | 675 | 		 * or progress in the filesystem.  So we cannot just sit here | 
 | 676 | 		 * waiting for IO to complete. | 
 | 677 | 		 */ | 
 | 678 | 		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) | 
 | 679 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 |         } | 
 | 681 | } | 
 | 682 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 |  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | 
 | 685 |  */ | 
 | 686 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 687 | 	void __user *buffer, size_t *length, loff_t *ppos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | { | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 689 | 	proc_dointvec(table, write, buffer, length, ppos); | 
| Jens Axboe | 6423104 | 2010-05-21 20:00:35 +0200 | [diff] [blame] | 690 | 	bdi_arm_supers_timer(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | 	return 0; | 
 | 692 | } | 
 | 693 |  | 
| Jens Axboe | c2c4986 | 2010-05-20 09:18:47 +0200 | [diff] [blame] | 694 | #ifdef CONFIG_BLOCK | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 695 | void laptop_mode_timer_fn(unsigned long data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | { | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 697 | 	struct request_queue *q = (struct request_queue *)data; | 
 | 698 | 	int nr_pages = global_page_state(NR_FILE_DIRTY) + | 
 | 699 | 		global_page_state(NR_UNSTABLE_NFS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 |  | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 701 | 	/* | 
 | 702 | 	 * We want to write everything out, not just down to the dirty | 
 | 703 | 	 * threshold | 
 | 704 | 	 */ | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 705 | 	if (bdi_has_dirty_io(&q->backing_dev_info)) | 
| Christoph Hellwig | c544419 | 2010-06-08 18:15:15 +0200 | [diff] [blame] | 706 | 		bdi_start_writeback(&q->backing_dev_info, nr_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | } | 
 | 708 |  | 
 | 709 | /* | 
 | 710 |  * We've spun up the disk and we're in laptop mode: schedule writeback | 
 | 711 |  * of all dirty data a few seconds from now.  If the flush is already scheduled | 
 | 712 |  * then push it back - the user is still using the disk. | 
 | 713 |  */ | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 714 | void laptop_io_completion(struct backing_dev_info *info) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | { | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 716 | 	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | } | 
 | 718 |  | 
 | 719 | /* | 
 | 720 |  * We're in laptop mode and we've just synced. The sync's writes will have | 
 | 721 |  * caused another writeback to be scheduled by laptop_io_completion. | 
 | 722 |  * Nothing needs to be written back anymore, so we unschedule the writeback. | 
 | 723 |  */ | 
 | 724 | void laptop_sync_completion(void) | 
 | 725 | { | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 726 | 	struct backing_dev_info *bdi; | 
 | 727 |  | 
 | 728 | 	rcu_read_lock(); | 
 | 729 |  | 
 | 730 | 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) | 
 | 731 | 		del_timer(&bdi->laptop_mode_wb_timer); | 
 | 732 |  | 
 | 733 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | } | 
| Jens Axboe | c2c4986 | 2010-05-20 09:18:47 +0200 | [diff] [blame] | 735 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 |  | 
 | 737 | /* | 
 | 738 |  * If ratelimit_pages is too high then we can get into dirty-data overload | 
 | 739 |  * if a large number of processes all perform writes at the same time. | 
 | 740 |  * If it is too low then SMP machines will call the (expensive) | 
 | 741 |  * get_writeback_state too often. | 
 | 742 |  * | 
 | 743 |  * Here we set ratelimit_pages to a level which ensures that when all CPUs are | 
 | 744 |  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | 
 | 745 |  * thresholds before writeback cuts in. | 
 | 746 |  * | 
 | 747 |  * But the limit should not be set too high.  Because it also controls the | 
 | 748 |  * amount of memory which the balance_dirty_pages() caller has to write back. | 
 | 749 |  * If this is too large then the caller will block on the IO queue all the | 
 | 750 |  * time.  So limit it to four megabytes - the balance_dirty_pages() caller | 
 | 751 |  * will write six megabyte chunks, max. | 
 | 752 |  */ | 
 | 753 |  | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 754 | void writeback_set_ratelimit(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | { | 
| Chandra Seetharaman | 40c99aa | 2006-09-29 02:01:24 -0700 | [diff] [blame] | 756 | 	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | 	if (ratelimit_pages < 16) | 
 | 758 | 		ratelimit_pages = 16; | 
 | 759 | 	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | 
 | 760 | 		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | 
 | 761 | } | 
 | 762 |  | 
| Chandra Seetharaman | 26c2143 | 2006-06-27 02:54:10 -0700 | [diff] [blame] | 763 | static int __cpuinit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | 
 | 765 | { | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 766 | 	writeback_set_ratelimit(); | 
| Paul E. McKenney | aa0f030 | 2007-02-10 01:46:37 -0800 | [diff] [blame] | 767 | 	return NOTIFY_DONE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | } | 
 | 769 |  | 
| Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 770 | static struct notifier_block __cpuinitdata ratelimit_nb = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | 	.notifier_call	= ratelimit_handler, | 
 | 772 | 	.next		= NULL, | 
 | 773 | }; | 
 | 774 |  | 
 | 775 | /* | 
| Linus Torvalds | dc6e29d | 2007-01-29 16:37:38 -0800 | [diff] [blame] | 776 |  * Called early on to tune the page writeback dirty limits. | 
 | 777 |  * | 
 | 778 |  * We used to scale dirty pages according to how total memory | 
 | 779 |  * related to pages that could be allocated for buffers (by | 
 | 780 |  * comparing nr_free_buffer_pages() to vm_total_pages. | 
 | 781 |  * | 
 | 782 |  * However, that was when we used "dirty_ratio" to scale with | 
 | 783 |  * all memory, and we don't do that any more. "dirty_ratio" | 
 | 784 |  * is now applied to total non-HIGHPAGE memory (by subtracting | 
 | 785 |  * totalhigh_pages from vm_total_pages), and as such we can't | 
 | 786 |  * get into the old insane situation any more where we had | 
 | 787 |  * large amounts of dirty pages compared to a small amount of | 
 | 788 |  * non-HIGHMEM memory. | 
 | 789 |  * | 
 | 790 |  * But we might still want to scale the dirty_ratio by how | 
 | 791 |  * much memory the box has.. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 |  */ | 
 | 793 | void __init page_writeback_init(void) | 
 | 794 | { | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 795 | 	int shift; | 
 | 796 |  | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 797 | 	writeback_set_ratelimit(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | 	register_cpu_notifier(&ratelimit_nb); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 799 |  | 
 | 800 | 	shift = calc_period_shift(); | 
 | 801 | 	prop_descriptor_init(&vm_completions, shift); | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 802 | 	prop_descriptor_init(&vm_dirties, shift); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | } | 
 | 804 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 805 | /** | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 806 |  * tag_pages_for_writeback - tag pages to be written by write_cache_pages | 
 | 807 |  * @mapping: address space structure to write | 
 | 808 |  * @start: starting page index | 
 | 809 |  * @end: ending page index (inclusive) | 
 | 810 |  * | 
 | 811 |  * This function scans the page range from @start to @end (inclusive) and tags | 
 | 812 |  * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is | 
 | 813 |  * that write_cache_pages (or whoever calls this function) will then use | 
 | 814 |  * TOWRITE tag to identify pages eligible for writeback.  This mechanism is | 
 | 815 |  * used to avoid livelocking of writeback by a process steadily creating new | 
 | 816 |  * dirty pages in the file (thus it is important for this function to be quick | 
 | 817 |  * so that it can tag pages faster than a dirtying process can create them). | 
 | 818 |  */ | 
 | 819 | /* | 
 | 820 |  * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency. | 
 | 821 |  */ | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 822 | void tag_pages_for_writeback(struct address_space *mapping, | 
 | 823 | 			     pgoff_t start, pgoff_t end) | 
 | 824 | { | 
| Randy Dunlap | 3c111a0 | 2010-08-11 14:17:30 -0700 | [diff] [blame] | 825 | #define WRITEBACK_TAG_BATCH 4096 | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 826 | 	unsigned long tagged; | 
 | 827 |  | 
 | 828 | 	do { | 
 | 829 | 		spin_lock_irq(&mapping->tree_lock); | 
 | 830 | 		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, | 
 | 831 | 				&start, end, WRITEBACK_TAG_BATCH, | 
 | 832 | 				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); | 
 | 833 | 		spin_unlock_irq(&mapping->tree_lock); | 
 | 834 | 		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); | 
 | 835 | 		cond_resched(); | 
| Jan Kara | d5ed3a4 | 2010-08-19 14:13:33 -0700 | [diff] [blame] | 836 | 		/* We check 'start' to handle wrapping when end == ~0UL */ | 
 | 837 | 	} while (tagged >= WRITEBACK_TAG_BATCH && start); | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 838 | } | 
 | 839 | EXPORT_SYMBOL(tag_pages_for_writeback); | 
 | 840 |  | 
 | 841 | /** | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 842 |  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 843 |  * @mapping: address space structure to write | 
 | 844 |  * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 845 |  * @writepage: function called for each page | 
 | 846 |  * @data: data passed to writepage function | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 847 |  * | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 848 |  * If a page is already under I/O, write_cache_pages() skips it, even | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 849 |  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback, | 
 | 850 |  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync() | 
 | 851 |  * and msync() need to guarantee that all the data which was dirty at the time | 
 | 852 |  * the call was made get new I/O started against them.  If wbc->sync_mode is | 
 | 853 |  * WB_SYNC_ALL then we were called for data integrity and we must wait for | 
 | 854 |  * existing IO to complete. | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 855 |  * | 
 | 856 |  * To avoid livelocks (when other process dirties new pages), we first tag | 
 | 857 |  * pages which should be written back with TOWRITE tag and only then start | 
 | 858 |  * writing them. For data-integrity sync we have to be careful so that we do | 
 | 859 |  * not miss some pages (e.g., because some other process has cleared TOWRITE | 
 | 860 |  * tag we set). The rule we follow is that TOWRITE tag can be cleared only | 
 | 861 |  * by the process clearing the DIRTY tag (and submitting the page for IO). | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 862 |  */ | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 863 | int write_cache_pages(struct address_space *mapping, | 
 | 864 | 		      struct writeback_control *wbc, writepage_t writepage, | 
 | 865 | 		      void *data) | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 866 | { | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 867 | 	int ret = 0; | 
 | 868 | 	int done = 0; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 869 | 	struct pagevec pvec; | 
 | 870 | 	int nr_pages; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 871 | 	pgoff_t uninitialized_var(writeback_index); | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 872 | 	pgoff_t index; | 
 | 873 | 	pgoff_t end;		/* Inclusive */ | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 874 | 	pgoff_t done_index; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 875 | 	int cycled; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 876 | 	int range_whole = 0; | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 877 | 	int tag; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 878 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 879 | 	pagevec_init(&pvec, 0); | 
 | 880 | 	if (wbc->range_cyclic) { | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 881 | 		writeback_index = mapping->writeback_index; /* prev offset */ | 
 | 882 | 		index = writeback_index; | 
 | 883 | 		if (index == 0) | 
 | 884 | 			cycled = 1; | 
 | 885 | 		else | 
 | 886 | 			cycled = 0; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 887 | 		end = -1; | 
 | 888 | 	} else { | 
 | 889 | 		index = wbc->range_start >> PAGE_CACHE_SHIFT; | 
 | 890 | 		end = wbc->range_end >> PAGE_CACHE_SHIFT; | 
 | 891 | 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | 
 | 892 | 			range_whole = 1; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 893 | 		cycled = 1; /* ignore range_cyclic tests */ | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 894 | 	} | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 895 | 	if (wbc->sync_mode == WB_SYNC_ALL) | 
 | 896 | 		tag = PAGECACHE_TAG_TOWRITE; | 
 | 897 | 	else | 
 | 898 | 		tag = PAGECACHE_TAG_DIRTY; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 899 | retry: | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 900 | 	if (wbc->sync_mode == WB_SYNC_ALL) | 
 | 901 | 		tag_pages_for_writeback(mapping, index, end); | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 902 | 	done_index = index; | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 903 | 	while (!done && (index <= end)) { | 
 | 904 | 		int i; | 
 | 905 |  | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 906 | 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 907 | 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | 
 | 908 | 		if (nr_pages == 0) | 
 | 909 | 			break; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 910 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 911 | 		for (i = 0; i < nr_pages; i++) { | 
 | 912 | 			struct page *page = pvec.pages[i]; | 
 | 913 |  | 
| Nick Piggin | d5482cd | 2009-01-06 14:39:11 -0800 | [diff] [blame] | 914 | 			/* | 
 | 915 | 			 * At this point, the page may be truncated or | 
 | 916 | 			 * invalidated (changing page->mapping to NULL), or | 
 | 917 | 			 * even swizzled back from swapper_space to tmpfs file | 
 | 918 | 			 * mapping. However, page->index will not change | 
 | 919 | 			 * because we have a reference on the page. | 
 | 920 | 			 */ | 
 | 921 | 			if (page->index > end) { | 
 | 922 | 				/* | 
 | 923 | 				 * can't be range_cyclic (1st pass) because | 
 | 924 | 				 * end == -1 in that case. | 
 | 925 | 				 */ | 
 | 926 | 				done = 1; | 
 | 927 | 				break; | 
 | 928 | 			} | 
 | 929 |  | 
| Jun'ichi Nomura | cf15b07 | 2011-03-22 16:33:40 -0700 | [diff] [blame] | 930 | 			done_index = page->index; | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 931 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 932 | 			lock_page(page); | 
 | 933 |  | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 934 | 			/* | 
 | 935 | 			 * Page truncated or invalidated. We can freely skip it | 
 | 936 | 			 * then, even for data integrity operations: the page | 
 | 937 | 			 * has disappeared concurrently, so there could be no | 
 | 938 | 			 * real expectation of this data interity operation | 
 | 939 | 			 * even if there is now a new, dirty page at the same | 
 | 940 | 			 * pagecache address. | 
 | 941 | 			 */ | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 942 | 			if (unlikely(page->mapping != mapping)) { | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 943 | continue_unlock: | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 944 | 				unlock_page(page); | 
 | 945 | 				continue; | 
 | 946 | 			} | 
 | 947 |  | 
| Nick Piggin | 515f4a0 | 2009-01-06 14:39:10 -0800 | [diff] [blame] | 948 | 			if (!PageDirty(page)) { | 
 | 949 | 				/* someone wrote it for us */ | 
 | 950 | 				goto continue_unlock; | 
 | 951 | 			} | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 952 |  | 
| Nick Piggin | 515f4a0 | 2009-01-06 14:39:10 -0800 | [diff] [blame] | 953 | 			if (PageWriteback(page)) { | 
 | 954 | 				if (wbc->sync_mode != WB_SYNC_NONE) | 
 | 955 | 					wait_on_page_writeback(page); | 
 | 956 | 				else | 
 | 957 | 					goto continue_unlock; | 
 | 958 | 			} | 
 | 959 |  | 
 | 960 | 			BUG_ON(PageWriteback(page)); | 
 | 961 | 			if (!clear_page_dirty_for_io(page)) | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 962 | 				goto continue_unlock; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 963 |  | 
| Dave Chinner | 9e09438 | 2010-07-07 13:24:08 +1000 | [diff] [blame] | 964 | 			trace_wbc_writepage(wbc, mapping->backing_dev_info); | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 965 | 			ret = (*writepage)(page, wbc, data); | 
| Nick Piggin | 0026677 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 966 | 			if (unlikely(ret)) { | 
 | 967 | 				if (ret == AOP_WRITEPAGE_ACTIVATE) { | 
 | 968 | 					unlock_page(page); | 
 | 969 | 					ret = 0; | 
 | 970 | 				} else { | 
 | 971 | 					/* | 
 | 972 | 					 * done_index is set past this page, | 
 | 973 | 					 * so media errors will not choke | 
 | 974 | 					 * background writeout for the entire | 
 | 975 | 					 * file. This has consequences for | 
 | 976 | 					 * range_cyclic semantics (ie. it may | 
 | 977 | 					 * not be suitable for data integrity | 
 | 978 | 					 * writeout). | 
 | 979 | 					 */ | 
| Jun'ichi Nomura | cf15b07 | 2011-03-22 16:33:40 -0700 | [diff] [blame] | 980 | 					done_index = page->index + 1; | 
| Nick Piggin | 0026677 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 981 | 					done = 1; | 
 | 982 | 					break; | 
 | 983 | 				} | 
| Dave Chinner | 0b56492 | 2010-06-09 10:37:18 +1000 | [diff] [blame] | 984 | 			} | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 985 |  | 
| Dave Chinner | 546a192 | 2010-08-24 11:44:34 +1000 | [diff] [blame] | 986 | 			/* | 
 | 987 | 			 * We stop writing back only if we are not doing | 
 | 988 | 			 * integrity sync. In case of integrity sync we have to | 
 | 989 | 			 * keep going until we have written all the pages | 
 | 990 | 			 * we tagged for writeback prior to entering this loop. | 
 | 991 | 			 */ | 
 | 992 | 			if (--wbc->nr_to_write <= 0 && | 
 | 993 | 			    wbc->sync_mode == WB_SYNC_NONE) { | 
 | 994 | 				done = 1; | 
 | 995 | 				break; | 
| Nick Piggin | 05fe478 | 2009-01-06 14:39:08 -0800 | [diff] [blame] | 996 | 			} | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 997 | 		} | 
 | 998 | 		pagevec_release(&pvec); | 
 | 999 | 		cond_resched(); | 
 | 1000 | 	} | 
| Nick Piggin | 3a4c680 | 2009-02-12 04:34:23 +0100 | [diff] [blame] | 1001 | 	if (!cycled && !done) { | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1002 | 		/* | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1003 | 		 * range_cyclic: | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1004 | 		 * We hit the last page and there is more work to be done: wrap | 
 | 1005 | 		 * back to the start of the file | 
 | 1006 | 		 */ | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1007 | 		cycled = 1; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1008 | 		index = 0; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1009 | 		end = writeback_index - 1; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1010 | 		goto retry; | 
 | 1011 | 	} | 
| Dave Chinner | 0b56492 | 2010-06-09 10:37:18 +1000 | [diff] [blame] | 1012 | 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | 
 | 1013 | 		mapping->writeback_index = done_index; | 
| Aneesh Kumar K.V | 06d6cf6 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1014 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1015 | 	return ret; | 
 | 1016 | } | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1017 | EXPORT_SYMBOL(write_cache_pages); | 
 | 1018 |  | 
 | 1019 | /* | 
 | 1020 |  * Function used by generic_writepages to call the real writepage | 
 | 1021 |  * function and set the mapping flags on error | 
 | 1022 |  */ | 
 | 1023 | static int __writepage(struct page *page, struct writeback_control *wbc, | 
 | 1024 | 		       void *data) | 
 | 1025 | { | 
 | 1026 | 	struct address_space *mapping = data; | 
 | 1027 | 	int ret = mapping->a_ops->writepage(page, wbc); | 
 | 1028 | 	mapping_set_error(mapping, ret); | 
 | 1029 | 	return ret; | 
 | 1030 | } | 
 | 1031 |  | 
 | 1032 | /** | 
 | 1033 |  * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. | 
 | 1034 |  * @mapping: address space structure to write | 
 | 1035 |  * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
 | 1036 |  * | 
 | 1037 |  * This is a library function, which implements the writepages() | 
 | 1038 |  * address_space_operation. | 
 | 1039 |  */ | 
 | 1040 | int generic_writepages(struct address_space *mapping, | 
 | 1041 | 		       struct writeback_control *wbc) | 
 | 1042 | { | 
| Shaohua Li | 9b6096a | 2011-03-17 10:47:06 +0100 | [diff] [blame] | 1043 | 	struct blk_plug plug; | 
 | 1044 | 	int ret; | 
 | 1045 |  | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1046 | 	/* deal with chardevs and other special file */ | 
 | 1047 | 	if (!mapping->a_ops->writepage) | 
 | 1048 | 		return 0; | 
 | 1049 |  | 
| Shaohua Li | 9b6096a | 2011-03-17 10:47:06 +0100 | [diff] [blame] | 1050 | 	blk_start_plug(&plug); | 
 | 1051 | 	ret = write_cache_pages(mapping, wbc, __writepage, mapping); | 
 | 1052 | 	blk_finish_plug(&plug); | 
 | 1053 | 	return ret; | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1054 | } | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1055 |  | 
 | 1056 | EXPORT_SYMBOL(generic_writepages); | 
 | 1057 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) | 
 | 1059 | { | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1060 | 	int ret; | 
 | 1061 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | 	if (wbc->nr_to_write <= 0) | 
 | 1063 | 		return 0; | 
 | 1064 | 	if (mapping->a_ops->writepages) | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1065 | 		ret = mapping->a_ops->writepages(mapping, wbc); | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1066 | 	else | 
 | 1067 | 		ret = generic_writepages(mapping, wbc); | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1068 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | } | 
 | 1070 |  | 
 | 1071 | /** | 
 | 1072 |  * write_one_page - write out a single page and optionally wait on I/O | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 1073 |  * @page: the page to write | 
 | 1074 |  * @wait: if true, wait on writeout | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1075 |  * | 
 | 1076 |  * The page must be locked by the caller and will be unlocked upon return. | 
 | 1077 |  * | 
 | 1078 |  * write_one_page() returns a negative error code if I/O failed. | 
 | 1079 |  */ | 
 | 1080 | int write_one_page(struct page *page, int wait) | 
 | 1081 | { | 
 | 1082 | 	struct address_space *mapping = page->mapping; | 
 | 1083 | 	int ret = 0; | 
 | 1084 | 	struct writeback_control wbc = { | 
 | 1085 | 		.sync_mode = WB_SYNC_ALL, | 
 | 1086 | 		.nr_to_write = 1, | 
 | 1087 | 	}; | 
 | 1088 |  | 
 | 1089 | 	BUG_ON(!PageLocked(page)); | 
 | 1090 |  | 
 | 1091 | 	if (wait) | 
 | 1092 | 		wait_on_page_writeback(page); | 
 | 1093 |  | 
 | 1094 | 	if (clear_page_dirty_for_io(page)) { | 
 | 1095 | 		page_cache_get(page); | 
 | 1096 | 		ret = mapping->a_ops->writepage(page, &wbc); | 
 | 1097 | 		if (ret == 0 && wait) { | 
 | 1098 | 			wait_on_page_writeback(page); | 
 | 1099 | 			if (PageError(page)) | 
 | 1100 | 				ret = -EIO; | 
 | 1101 | 		} | 
 | 1102 | 		page_cache_release(page); | 
 | 1103 | 	} else { | 
 | 1104 | 		unlock_page(page); | 
 | 1105 | 	} | 
 | 1106 | 	return ret; | 
 | 1107 | } | 
 | 1108 | EXPORT_SYMBOL(write_one_page); | 
 | 1109 |  | 
 | 1110 | /* | 
| Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 1111 |  * For address_spaces which do not use buffers nor write back. | 
 | 1112 |  */ | 
 | 1113 | int __set_page_dirty_no_writeback(struct page *page) | 
 | 1114 | { | 
 | 1115 | 	if (!PageDirty(page)) | 
| Bob Liu | c3f0da6 | 2011-01-13 15:45:49 -0800 | [diff] [blame] | 1116 | 		return !TestSetPageDirty(page); | 
| Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 1117 | 	return 0; | 
 | 1118 | } | 
 | 1119 |  | 
 | 1120 | /* | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1121 |  * Helper function for set_page_dirty family. | 
 | 1122 |  * NOTE: This relies on being atomic wrt interrupts. | 
 | 1123 |  */ | 
 | 1124 | void account_page_dirtied(struct page *page, struct address_space *mapping) | 
 | 1125 | { | 
 | 1126 | 	if (mapping_cap_account_dirty(mapping)) { | 
 | 1127 | 		__inc_zone_page_state(page, NR_FILE_DIRTY); | 
| Michael Rubin | ea941f0 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 1128 | 		__inc_zone_page_state(page, NR_DIRTIED); | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1129 | 		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 
 | 1130 | 		task_dirty_inc(current); | 
 | 1131 | 		task_io_account_write(PAGE_CACHE_SIZE); | 
 | 1132 | 	} | 
 | 1133 | } | 
| Michael Rubin | 679ceac | 2010-08-20 02:31:26 -0700 | [diff] [blame] | 1134 | EXPORT_SYMBOL(account_page_dirtied); | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1135 |  | 
 | 1136 | /* | 
| Michael Rubin | f629d1c | 2010-10-26 14:21:33 -0700 | [diff] [blame] | 1137 |  * Helper function for set_page_writeback family. | 
 | 1138 |  * NOTE: Unlike account_page_dirtied this does not rely on being atomic | 
 | 1139 |  * wrt interrupts. | 
 | 1140 |  */ | 
 | 1141 | void account_page_writeback(struct page *page) | 
 | 1142 | { | 
 | 1143 | 	inc_zone_page_state(page, NR_WRITEBACK); | 
| Michael Rubin | ea941f0 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 1144 | 	inc_zone_page_state(page, NR_WRITTEN); | 
| Michael Rubin | f629d1c | 2010-10-26 14:21:33 -0700 | [diff] [blame] | 1145 | } | 
 | 1146 | EXPORT_SYMBOL(account_page_writeback); | 
 | 1147 |  | 
 | 1148 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 |  * For address_spaces which do not use buffers.  Just tag the page as dirty in | 
 | 1150 |  * its radix tree. | 
 | 1151 |  * | 
 | 1152 |  * This is also used when a single buffer is being dirtied: we want to set the | 
 | 1153 |  * page dirty in that case, but not all the buffers.  This is a "bottom-up" | 
 | 1154 |  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | 
 | 1155 |  * | 
 | 1156 |  * Most callers have locked the page, which pins the address_space in memory. | 
 | 1157 |  * But zap_pte_range() does not lock the page, however in that case the | 
 | 1158 |  * mapping is pinned by the vma's ->vm_file reference. | 
 | 1159 |  * | 
 | 1160 |  * We take care to handle the case where the page was truncated from the | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 1161 |  * mapping by re-checking page_mapping() inside tree_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 |  */ | 
 | 1163 | int __set_page_dirty_nobuffers(struct page *page) | 
 | 1164 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | 	if (!TestSetPageDirty(page)) { | 
 | 1166 | 		struct address_space *mapping = page_mapping(page); | 
 | 1167 | 		struct address_space *mapping2; | 
 | 1168 |  | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1169 | 		if (!mapping) | 
 | 1170 | 			return 1; | 
 | 1171 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1172 | 		spin_lock_irq(&mapping->tree_lock); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1173 | 		mapping2 = page_mapping(page); | 
 | 1174 | 		if (mapping2) { /* Race with truncate? */ | 
 | 1175 | 			BUG_ON(mapping2 != mapping); | 
| Nick Piggin | 787d221 | 2007-07-17 04:03:34 -0700 | [diff] [blame] | 1176 | 			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1177 | 			account_page_dirtied(page, mapping); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1178 | 			radix_tree_tag_set(&mapping->page_tree, | 
 | 1179 | 				page_index(page), PAGECACHE_TAG_DIRTY); | 
 | 1180 | 		} | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1181 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1182 | 		if (mapping->host) { | 
 | 1183 | 			/* !PageAnon && !swapper_space */ | 
 | 1184 | 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1185 | 		} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1186 | 		return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 | 	} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1188 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | } | 
 | 1190 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | 
 | 1191 |  | 
 | 1192 | /* | 
 | 1193 |  * When a writepage implementation decides that it doesn't want to write this | 
 | 1194 |  * page for some reason, it should redirty the locked page via | 
 | 1195 |  * redirty_page_for_writepage() and it should then unlock the page and return 0 | 
 | 1196 |  */ | 
 | 1197 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | 
 | 1198 | { | 
 | 1199 | 	wbc->pages_skipped++; | 
 | 1200 | 	return __set_page_dirty_nobuffers(page); | 
 | 1201 | } | 
 | 1202 | EXPORT_SYMBOL(redirty_page_for_writepage); | 
 | 1203 |  | 
 | 1204 | /* | 
| Wu Fengguang | 6746aff | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 1205 |  * Dirty a page. | 
 | 1206 |  * | 
 | 1207 |  * For pages with a mapping this should be done under the page lock | 
 | 1208 |  * for the benefit of asynchronous memory errors who prefer a consistent | 
 | 1209 |  * dirty state. This rule can be broken in some special cases, | 
 | 1210 |  * but should be better not to. | 
 | 1211 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 |  * If the mapping doesn't provide a set_page_dirty a_op, then | 
 | 1213 |  * just fall through and assume that it wants buffer_heads. | 
 | 1214 |  */ | 
| Nick Piggin | 1cf6e7d | 2009-02-18 14:48:18 -0800 | [diff] [blame] | 1215 | int set_page_dirty(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | { | 
 | 1217 | 	struct address_space *mapping = page_mapping(page); | 
 | 1218 |  | 
 | 1219 | 	if (likely(mapping)) { | 
 | 1220 | 		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | 
| Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 1221 | 		/* | 
 | 1222 | 		 * readahead/lru_deactivate_page could remain | 
 | 1223 | 		 * PG_readahead/PG_reclaim due to race with end_page_writeback | 
 | 1224 | 		 * About readahead, if the page is written, the flags would be | 
 | 1225 | 		 * reset. So no problem. | 
 | 1226 | 		 * About lru_deactivate_page, if the page is redirty, the flag | 
 | 1227 | 		 * will be reset. So no problem. but if the page is used by readahead | 
 | 1228 | 		 * it will confuse readahead and make it restart the size rampup | 
 | 1229 | 		 * process. But it's a trivial problem. | 
 | 1230 | 		 */ | 
 | 1231 | 		ClearPageReclaim(page); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1232 | #ifdef CONFIG_BLOCK | 
 | 1233 | 		if (!spd) | 
 | 1234 | 			spd = __set_page_dirty_buffers; | 
 | 1235 | #endif | 
 | 1236 | 		return (*spd)(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | 	} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1238 | 	if (!PageDirty(page)) { | 
 | 1239 | 		if (!TestSetPageDirty(page)) | 
 | 1240 | 			return 1; | 
 | 1241 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1242 | 	return 0; | 
 | 1243 | } | 
 | 1244 | EXPORT_SYMBOL(set_page_dirty); | 
 | 1245 |  | 
 | 1246 | /* | 
 | 1247 |  * set_page_dirty() is racy if the caller has no reference against | 
 | 1248 |  * page->mapping->host, and if the page is unlocked.  This is because another | 
 | 1249 |  * CPU could truncate the page off the mapping and then free the mapping. | 
 | 1250 |  * | 
 | 1251 |  * Usually, the page _is_ locked, or the caller is a user-space process which | 
 | 1252 |  * holds a reference on the inode by having an open file. | 
 | 1253 |  * | 
 | 1254 |  * In other cases, the page should be locked before running set_page_dirty(). | 
 | 1255 |  */ | 
 | 1256 | int set_page_dirty_lock(struct page *page) | 
 | 1257 | { | 
 | 1258 | 	int ret; | 
 | 1259 |  | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 1260 | 	lock_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | 	ret = set_page_dirty(page); | 
 | 1262 | 	unlock_page(page); | 
 | 1263 | 	return ret; | 
 | 1264 | } | 
 | 1265 | EXPORT_SYMBOL(set_page_dirty_lock); | 
 | 1266 |  | 
 | 1267 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1268 |  * Clear a page's dirty flag, while caring for dirty memory accounting. | 
 | 1269 |  * Returns true if the page was previously dirty. | 
 | 1270 |  * | 
 | 1271 |  * This is for preparing to put the page under writeout.  We leave the page | 
 | 1272 |  * tagged as dirty in the radix tree so that a concurrent write-for-sync | 
 | 1273 |  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage | 
 | 1274 |  * implementation will run either set_page_writeback() or set_page_dirty(), | 
 | 1275 |  * at which stage we bring the page's dirty flag and radix-tree dirty tag | 
 | 1276 |  * back into sync. | 
 | 1277 |  * | 
 | 1278 |  * This incoherency between the page's dirty flag and radix-tree tag is | 
 | 1279 |  * unfortunate, but it only exists while the page is locked. | 
 | 1280 |  */ | 
 | 1281 | int clear_page_dirty_for_io(struct page *page) | 
 | 1282 | { | 
 | 1283 | 	struct address_space *mapping = page_mapping(page); | 
 | 1284 |  | 
| Nick Piggin | 7935289 | 2007-07-19 01:47:22 -0700 | [diff] [blame] | 1285 | 	BUG_ON(!PageLocked(page)); | 
 | 1286 |  | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1287 | 	if (mapping && mapping_cap_account_dirty(mapping)) { | 
 | 1288 | 		/* | 
 | 1289 | 		 * Yes, Virginia, this is indeed insane. | 
 | 1290 | 		 * | 
 | 1291 | 		 * We use this sequence to make sure that | 
 | 1292 | 		 *  (a) we account for dirty stats properly | 
 | 1293 | 		 *  (b) we tell the low-level filesystem to | 
 | 1294 | 		 *      mark the whole page dirty if it was | 
 | 1295 | 		 *      dirty in a pagetable. Only to then | 
 | 1296 | 		 *  (c) clean the page again and return 1 to | 
 | 1297 | 		 *      cause the writeback. | 
 | 1298 | 		 * | 
 | 1299 | 		 * This way we avoid all nasty races with the | 
 | 1300 | 		 * dirty bit in multiple places and clearing | 
 | 1301 | 		 * them concurrently from different threads. | 
 | 1302 | 		 * | 
 | 1303 | 		 * Note! Normally the "set_page_dirty(page)" | 
 | 1304 | 		 * has no effect on the actual dirty bit - since | 
 | 1305 | 		 * that will already usually be set. But we | 
 | 1306 | 		 * need the side effects, and it can help us | 
 | 1307 | 		 * avoid races. | 
 | 1308 | 		 * | 
 | 1309 | 		 * We basically use the page "master dirty bit" | 
 | 1310 | 		 * as a serialization point for all the different | 
 | 1311 | 		 * threads doing their things. | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1312 | 		 */ | 
 | 1313 | 		if (page_mkclean(page)) | 
 | 1314 | 			set_page_dirty(page); | 
| Nick Piggin | 7935289 | 2007-07-19 01:47:22 -0700 | [diff] [blame] | 1315 | 		/* | 
 | 1316 | 		 * We carefully synchronise fault handlers against | 
 | 1317 | 		 * installing a dirty pte and marking the page dirty | 
 | 1318 | 		 * at this point. We do this by having them hold the | 
 | 1319 | 		 * page lock at some point after installing their | 
 | 1320 | 		 * pte, but before marking the page dirty. | 
 | 1321 | 		 * Pages are always locked coming in here, so we get | 
 | 1322 | 		 * the desired exclusion. See mm/memory.c:do_wp_page() | 
 | 1323 | 		 * for more comments. | 
 | 1324 | 		 */ | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1325 | 		if (TestClearPageDirty(page)) { | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1326 | 			dec_zone_page_state(page, NR_FILE_DIRTY); | 
| Peter Zijlstra | c9e51e4 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 1327 | 			dec_bdi_stat(mapping->backing_dev_info, | 
 | 1328 | 					BDI_RECLAIMABLE); | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1329 | 			return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 | 		} | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1331 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1332 | 	} | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1333 | 	return TestClearPageDirty(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | } | 
| Hans Reiser | 58bb01a | 2005-11-18 01:10:53 -0800 | [diff] [blame] | 1335 | EXPORT_SYMBOL(clear_page_dirty_for_io); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 |  | 
 | 1337 | int test_clear_page_writeback(struct page *page) | 
 | 1338 | { | 
 | 1339 | 	struct address_space *mapping = page_mapping(page); | 
 | 1340 | 	int ret; | 
 | 1341 |  | 
 | 1342 | 	if (mapping) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1343 | 		struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | 		unsigned long flags; | 
 | 1345 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1346 | 		spin_lock_irqsave(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | 		ret = TestClearPageWriteback(page); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1348 | 		if (ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1349 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 1350 | 						page_index(page), | 
 | 1351 | 						PAGECACHE_TAG_WRITEBACK); | 
| Miklos Szeredi | e4ad08f | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 1352 | 			if (bdi_cap_account_writeback(bdi)) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1353 | 				__dec_bdi_stat(bdi, BDI_WRITEBACK); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1354 | 				__bdi_writeout_inc(bdi); | 
 | 1355 | 			} | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1356 | 		} | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1357 | 		spin_unlock_irqrestore(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1358 | 	} else { | 
 | 1359 | 		ret = TestClearPageWriteback(page); | 
 | 1360 | 	} | 
| Andrew Morton | d688abf | 2007-07-19 01:49:17 -0700 | [diff] [blame] | 1361 | 	if (ret) | 
 | 1362 | 		dec_zone_page_state(page, NR_WRITEBACK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 | 	return ret; | 
 | 1364 | } | 
 | 1365 |  | 
 | 1366 | int test_set_page_writeback(struct page *page) | 
 | 1367 | { | 
 | 1368 | 	struct address_space *mapping = page_mapping(page); | 
 | 1369 | 	int ret; | 
 | 1370 |  | 
 | 1371 | 	if (mapping) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1372 | 		struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1373 | 		unsigned long flags; | 
 | 1374 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1375 | 		spin_lock_irqsave(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | 		ret = TestSetPageWriteback(page); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1377 | 		if (!ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | 			radix_tree_tag_set(&mapping->page_tree, | 
 | 1379 | 						page_index(page), | 
 | 1380 | 						PAGECACHE_TAG_WRITEBACK); | 
| Miklos Szeredi | e4ad08f | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 1381 | 			if (bdi_cap_account_writeback(bdi)) | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1382 | 				__inc_bdi_stat(bdi, BDI_WRITEBACK); | 
 | 1383 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1384 | 		if (!PageDirty(page)) | 
 | 1385 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 1386 | 						page_index(page), | 
 | 1387 | 						PAGECACHE_TAG_DIRTY); | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1388 | 		radix_tree_tag_clear(&mapping->page_tree, | 
 | 1389 | 				     page_index(page), | 
 | 1390 | 				     PAGECACHE_TAG_TOWRITE); | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1391 | 		spin_unlock_irqrestore(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | 	} else { | 
 | 1393 | 		ret = TestSetPageWriteback(page); | 
 | 1394 | 	} | 
| Andrew Morton | d688abf | 2007-07-19 01:49:17 -0700 | [diff] [blame] | 1395 | 	if (!ret) | 
| Michael Rubin | f629d1c | 2010-10-26 14:21:33 -0700 | [diff] [blame] | 1396 | 		account_page_writeback(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | 	return ret; | 
 | 1398 |  | 
 | 1399 | } | 
 | 1400 | EXPORT_SYMBOL(test_set_page_writeback); | 
 | 1401 |  | 
 | 1402 | /* | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1403 |  * Return true if any of the pages in the mapping are marked with the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 |  * passed tag. | 
 | 1405 |  */ | 
 | 1406 | int mapping_tagged(struct address_space *mapping, int tag) | 
 | 1407 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | 	int ret; | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1409 | 	rcu_read_lock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1410 | 	ret = radix_tree_tagged(&mapping->page_tree, tag); | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1411 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 | 	return ret; | 
 | 1413 | } | 
 | 1414 | EXPORT_SYMBOL(mapping_tagged); |