| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 |  * mm/page-writeback.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 5 |  * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  * Contains functions related to writing back dirty pages at the | 
 | 8 |  * address_space level. | 
 | 9 |  * | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 10 |  * 10Apr2002	Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  *		Initial version | 
 | 12 |  */ | 
 | 13 |  | 
 | 14 | #include <linux/kernel.h> | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 15 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/spinlock.h> | 
 | 17 | #include <linux/fs.h> | 
 | 18 | #include <linux/mm.h> | 
 | 19 | #include <linux/swap.h> | 
 | 20 | #include <linux/slab.h> | 
 | 21 | #include <linux/pagemap.h> | 
 | 22 | #include <linux/writeback.h> | 
 | 23 | #include <linux/init.h> | 
 | 24 | #include <linux/backing-dev.h> | 
| Andrew Morton | 55e829a | 2006-12-10 02:19:27 -0800 | [diff] [blame] | 25 | #include <linux/task_io_accounting_ops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/blkdev.h> | 
 | 27 | #include <linux/mpage.h> | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 28 | #include <linux/rmap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/percpu.h> | 
 | 30 | #include <linux/notifier.h> | 
 | 31 | #include <linux/smp.h> | 
 | 32 | #include <linux/sysctl.h> | 
 | 33 | #include <linux/cpu.h> | 
 | 34 | #include <linux/syscalls.h> | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 35 | #include <linux/buffer_head.h> | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 36 | #include <linux/pagevec.h> | 
| Dave Chinner | 028c2dd | 2010-07-07 13:24:07 +1000 | [diff] [blame] | 37 | #include <trace/events/writeback.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
 | 39 | /* | 
| Wu Fengguang | ffd1f60 | 2011-06-19 22:18:42 -0600 | [diff] [blame] | 40 |  * Sleep at most 200ms at a time in balance_dirty_pages(). | 
 | 41 |  */ | 
 | 42 | #define MAX_PAUSE		max(HZ/5, 1) | 
 | 43 |  | 
 | 44 | /* | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 45 |  * Estimate write bandwidth at 200ms intervals. | 
 | 46 |  */ | 
 | 47 | #define BANDWIDTH_INTERVAL	max(HZ/5, 1) | 
 | 48 |  | 
| Wu Fengguang | 6c14ae1 | 2011-03-02 16:04:18 -0600 | [diff] [blame] | 49 | #define RATELIMIT_CALC_SHIFT	10 | 
 | 50 |  | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 51 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 |  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | 
 | 53 |  * will look to see if it needs to force writeback or throttling. | 
 | 54 |  */ | 
 | 55 | static long ratelimit_pages = 32; | 
 | 56 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | /* The following parameters are exported via /proc/sys/vm */ | 
 | 58 |  | 
 | 59 | /* | 
| Jens Axboe | 5b0830c | 2009-09-23 19:37:09 +0200 | [diff] [blame] | 60 |  * Start background writeback (via writeback threads) at this percentage | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 |  */ | 
| Wu Fengguang | 1b5e62b | 2009-03-23 08:57:38 +0800 | [diff] [blame] | 62 | int dirty_background_ratio = 10; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 |  | 
 | 64 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 65 |  * dirty_background_bytes starts at 0 (disabled) so that it is a function of | 
 | 66 |  * dirty_background_ratio * the amount of dirtyable memory | 
 | 67 |  */ | 
 | 68 | unsigned long dirty_background_bytes; | 
 | 69 |  | 
 | 70 | /* | 
| Bron Gondwana | 195cf45 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 71 |  * free highmem will not be subtracted from the total free memory | 
 | 72 |  * for calculating free ratios if vm_highmem_is_dirtyable is true | 
 | 73 |  */ | 
 | 74 | int vm_highmem_is_dirtyable; | 
 | 75 |  | 
 | 76 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 |  * The generator of dirty data starts writeback at this percentage | 
 | 78 |  */ | 
| Wu Fengguang | 1b5e62b | 2009-03-23 08:57:38 +0800 | [diff] [blame] | 79 | int vm_dirty_ratio = 20; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 |  | 
 | 81 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 82 |  * vm_dirty_bytes starts at 0 (disabled) so that it is a function of | 
 | 83 |  * vm_dirty_ratio * the amount of dirtyable memory | 
 | 84 |  */ | 
 | 85 | unsigned long vm_dirty_bytes; | 
 | 86 |  | 
 | 87 | /* | 
| Alexey Dobriyan | 704503d | 2009-03-31 15:23:18 -0700 | [diff] [blame] | 88 |  * The interval between `kupdate'-style writebacks | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  */ | 
| Toshiyuki Okajima | 22ef37e | 2009-05-16 22:56:28 -0700 | [diff] [blame] | 90 | unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 |  | 
 | 92 | /* | 
| Alexey Dobriyan | 704503d | 2009-03-31 15:23:18 -0700 | [diff] [blame] | 93 |  * The longest time for which data is allowed to remain dirty | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 |  */ | 
| Toshiyuki Okajima | 22ef37e | 2009-05-16 22:56:28 -0700 | [diff] [blame] | 95 | unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  | 
 | 97 | /* | 
 | 98 |  * Flag that makes the machine dump writes/reads and block dirtyings. | 
 | 99 |  */ | 
 | 100 | int block_dump; | 
 | 101 |  | 
 | 102 | /* | 
| Bart Samwel | ed5b43f | 2006-03-24 03:15:49 -0800 | [diff] [blame] | 103 |  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: | 
 | 104 |  * a full sync is triggered after this time elapses without any disk activity. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 |  */ | 
 | 106 | int laptop_mode; | 
 | 107 |  | 
 | 108 | EXPORT_SYMBOL(laptop_mode); | 
 | 109 |  | 
 | 110 | /* End of sysctl-exported parameters */ | 
 | 111 |  | 
| Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 112 | unsigned long global_dirty_limit; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | /* | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 115 |  * Scale the writeback cache size proportional to the relative writeout speeds. | 
 | 116 |  * | 
 | 117 |  * We do this by keeping a floating proportion between BDIs, based on page | 
 | 118 |  * writeback completions [end_page_writeback()]. Those devices that write out | 
 | 119 |  * pages fastest will get the larger share, while the slower will get a smaller | 
 | 120 |  * share. | 
 | 121 |  * | 
 | 122 |  * We use page writeout completions because we are interested in getting rid of | 
 | 123 |  * dirty pages. Having them written out is the primary goal. | 
 | 124 |  * | 
 | 125 |  * We introduce a concept of time, a period over which we measure these events, | 
 | 126 |  * because demand can/will vary over time. The length of this period itself is | 
 | 127 |  * measured in page writeback completions. | 
 | 128 |  * | 
 | 129 |  */ | 
 | 130 | static struct prop_descriptor vm_completions; | 
 | 131 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 132 | /* | 
 | 133 |  * couple the period to the dirty_ratio: | 
 | 134 |  * | 
 | 135 |  *   period/2 ~ roundup_pow_of_two(dirty limit) | 
 | 136 |  */ | 
 | 137 | static int calc_period_shift(void) | 
 | 138 | { | 
 | 139 | 	unsigned long dirty_total; | 
 | 140 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 141 | 	if (vm_dirty_bytes) | 
 | 142 | 		dirty_total = vm_dirty_bytes / PAGE_SIZE; | 
 | 143 | 	else | 
 | 144 | 		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / | 
 | 145 | 				100; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 146 | 	return 2 + ilog2(dirty_total - 1); | 
 | 147 | } | 
 | 148 |  | 
 | 149 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 150 |  * update the period when the dirty threshold changes. | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 151 |  */ | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 152 | static void update_completion_period(void) | 
 | 153 | { | 
 | 154 | 	int shift = calc_period_shift(); | 
 | 155 | 	prop_change_shift(&vm_completions, shift); | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 156 |  | 
 | 157 | 	writeback_set_ratelimit(); | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 158 | } | 
 | 159 |  | 
 | 160 | int dirty_background_ratio_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 161 | 		void __user *buffer, size_t *lenp, | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 162 | 		loff_t *ppos) | 
 | 163 | { | 
 | 164 | 	int ret; | 
 | 165 |  | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 166 | 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 167 | 	if (ret == 0 && write) | 
 | 168 | 		dirty_background_bytes = 0; | 
 | 169 | 	return ret; | 
 | 170 | } | 
 | 171 |  | 
 | 172 | int dirty_background_bytes_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 173 | 		void __user *buffer, size_t *lenp, | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 174 | 		loff_t *ppos) | 
 | 175 | { | 
 | 176 | 	int ret; | 
 | 177 |  | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 178 | 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 179 | 	if (ret == 0 && write) | 
 | 180 | 		dirty_background_ratio = 0; | 
 | 181 | 	return ret; | 
 | 182 | } | 
 | 183 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 184 | int dirty_ratio_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 185 | 		void __user *buffer, size_t *lenp, | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 186 | 		loff_t *ppos) | 
 | 187 | { | 
 | 188 | 	int old_ratio = vm_dirty_ratio; | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 189 | 	int ret; | 
 | 190 |  | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 191 | 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 192 | 	if (ret == 0 && write && vm_dirty_ratio != old_ratio) { | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 193 | 		update_completion_period(); | 
 | 194 | 		vm_dirty_bytes = 0; | 
 | 195 | 	} | 
 | 196 | 	return ret; | 
 | 197 | } | 
 | 198 |  | 
 | 199 |  | 
 | 200 | int dirty_bytes_handler(struct ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 201 | 		void __user *buffer, size_t *lenp, | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 202 | 		loff_t *ppos) | 
 | 203 | { | 
| Sven Wegener | fc3501d | 2009-02-11 13:04:23 -0800 | [diff] [blame] | 204 | 	unsigned long old_bytes = vm_dirty_bytes; | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 205 | 	int ret; | 
 | 206 |  | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 207 | 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 208 | 	if (ret == 0 && write && vm_dirty_bytes != old_bytes) { | 
 | 209 | 		update_completion_period(); | 
 | 210 | 		vm_dirty_ratio = 0; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 211 | 	} | 
 | 212 | 	return ret; | 
 | 213 | } | 
 | 214 |  | 
 | 215 | /* | 
 | 216 |  * Increment the BDI's writeout completion count and the global writeout | 
 | 217 |  * completion count. Called from test_clear_page_writeback(). | 
 | 218 |  */ | 
 | 219 | static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) | 
 | 220 | { | 
| Jan Kara | f7d2b1e | 2010-12-08 22:44:24 -0600 | [diff] [blame] | 221 | 	__inc_bdi_stat(bdi, BDI_WRITTEN); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 222 | 	__prop_inc_percpu_max(&vm_completions, &bdi->completions, | 
 | 223 | 			      bdi->max_prop_frac); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 224 | } | 
 | 225 |  | 
| Miklos Szeredi | dd5656e | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 226 | void bdi_writeout_inc(struct backing_dev_info *bdi) | 
 | 227 | { | 
 | 228 | 	unsigned long flags; | 
 | 229 |  | 
 | 230 | 	local_irq_save(flags); | 
 | 231 | 	__bdi_writeout_inc(bdi); | 
 | 232 | 	local_irq_restore(flags); | 
 | 233 | } | 
 | 234 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | 
 | 235 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 236 | /* | 
 | 237 |  * Obtain an accurate fraction of the BDI's portion. | 
 | 238 |  */ | 
 | 239 | static void bdi_writeout_fraction(struct backing_dev_info *bdi, | 
 | 240 | 		long *numerator, long *denominator) | 
 | 241 | { | 
| Wu Fengguang | 3efaf0f | 2010-12-16 22:22:00 -0600 | [diff] [blame] | 242 | 	prop_fraction_percpu(&vm_completions, &bdi->completions, | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 243 | 				numerator, denominator); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 244 | } | 
 | 245 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 246 | /* | 
| Johannes Weiner | d08c429 | 2011-10-31 17:07:05 -0700 | [diff] [blame] | 247 |  * bdi_min_ratio keeps the sum of the minimum dirty shares of all | 
 | 248 |  * registered backing devices, which, for obvious reasons, can not | 
 | 249 |  * exceed 100%. | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 250 |  */ | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 251 | static unsigned int bdi_min_ratio; | 
 | 252 |  | 
 | 253 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) | 
 | 254 | { | 
 | 255 | 	int ret = 0; | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 256 |  | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 257 | 	spin_lock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 258 | 	if (min_ratio > bdi->max_ratio) { | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 259 | 		ret = -EINVAL; | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 260 | 	} else { | 
 | 261 | 		min_ratio -= bdi->min_ratio; | 
 | 262 | 		if (bdi_min_ratio + min_ratio < 100) { | 
 | 263 | 			bdi_min_ratio += min_ratio; | 
 | 264 | 			bdi->min_ratio += min_ratio; | 
 | 265 | 		} else { | 
 | 266 | 			ret = -EINVAL; | 
 | 267 | 		} | 
 | 268 | 	} | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 269 | 	spin_unlock_bh(&bdi_lock); | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 270 |  | 
 | 271 | 	return ret; | 
 | 272 | } | 
 | 273 |  | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 274 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) | 
 | 275 | { | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 276 | 	int ret = 0; | 
 | 277 |  | 
 | 278 | 	if (max_ratio > 100) | 
 | 279 | 		return -EINVAL; | 
 | 280 |  | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 281 | 	spin_lock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 282 | 	if (bdi->min_ratio > max_ratio) { | 
 | 283 | 		ret = -EINVAL; | 
 | 284 | 	} else { | 
 | 285 | 		bdi->max_ratio = max_ratio; | 
 | 286 | 		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; | 
 | 287 | 	} | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 288 | 	spin_unlock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 289 |  | 
 | 290 | 	return ret; | 
 | 291 | } | 
 | 292 | EXPORT_SYMBOL(bdi_set_max_ratio); | 
 | 293 |  | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 294 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 |  * Work out the current dirty-memory clamping and background writeout | 
 | 296 |  * thresholds. | 
 | 297 |  * | 
 | 298 |  * The main aim here is to lower them aggressively if there is a lot of mapped | 
 | 299 |  * memory around.  To avoid stressing page reclaim with lots of unreclaimable | 
 | 300 |  * pages.  It is better to clamp down on writers than to start swapping, and | 
 | 301 |  * performing lots of scanning. | 
 | 302 |  * | 
 | 303 |  * We only allow 1/2 of the currently-unmapped memory to be dirtied. | 
 | 304 |  * | 
 | 305 |  * We don't permit the clamping level to fall below 5% - that is getting rather | 
 | 306 |  * excessive. | 
 | 307 |  * | 
 | 308 |  * We make sure that the background writeout level is below the adjusted | 
 | 309 |  * clamping level. | 
 | 310 |  */ | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 311 |  | 
 | 312 | static unsigned long highmem_dirtyable_memory(unsigned long total) | 
 | 313 | { | 
 | 314 | #ifdef CONFIG_HIGHMEM | 
 | 315 | 	int node; | 
 | 316 | 	unsigned long x = 0; | 
 | 317 |  | 
| Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 318 | 	for_each_node_state(node, N_HIGH_MEMORY) { | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 319 | 		struct zone *z = | 
 | 320 | 			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | 
 | 321 |  | 
| Wu Fengguang | adea02a | 2009-09-21 17:01:42 -0700 | [diff] [blame] | 322 | 		x += zone_page_state(z, NR_FREE_PAGES) + | 
 | 323 | 		     zone_reclaimable_pages(z); | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 324 | 	} | 
 | 325 | 	/* | 
 | 326 | 	 * Make sure that the number of highmem pages is never larger | 
 | 327 | 	 * than the number of the total dirtyable memory. This can only | 
 | 328 | 	 * occur in very strange VM situations but we want to make sure | 
 | 329 | 	 * that this does not occur. | 
 | 330 | 	 */ | 
 | 331 | 	return min(x, total); | 
 | 332 | #else | 
 | 333 | 	return 0; | 
 | 334 | #endif | 
 | 335 | } | 
 | 336 |  | 
| Steven Rostedt | 3eefae9 | 2008-05-12 21:21:04 +0200 | [diff] [blame] | 337 | /** | 
 | 338 |  * determine_dirtyable_memory - amount of memory that may be used | 
 | 339 |  * | 
 | 340 |  * Returns the numebr of pages that can currently be freed and used | 
 | 341 |  * by the kernel for direct mappings. | 
 | 342 |  */ | 
 | 343 | unsigned long determine_dirtyable_memory(void) | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 344 | { | 
 | 345 | 	unsigned long x; | 
 | 346 |  | 
| Wu Fengguang | adea02a | 2009-09-21 17:01:42 -0700 | [diff] [blame] | 347 | 	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); | 
| Bron Gondwana | 195cf45 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 348 |  | 
 | 349 | 	if (!vm_highmem_is_dirtyable) | 
 | 350 | 		x -= highmem_dirtyable_memory(x); | 
 | 351 |  | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 352 | 	return x + 1;	/* Ensure that we never return 0 */ | 
 | 353 | } | 
 | 354 |  | 
| Wu Fengguang | 6c14ae1 | 2011-03-02 16:04:18 -0600 | [diff] [blame] | 355 | static unsigned long dirty_freerun_ceiling(unsigned long thresh, | 
 | 356 | 					   unsigned long bg_thresh) | 
 | 357 | { | 
 | 358 | 	return (thresh + bg_thresh) / 2; | 
 | 359 | } | 
 | 360 |  | 
| Wu Fengguang | ffd1f60 | 2011-06-19 22:18:42 -0600 | [diff] [blame] | 361 | static unsigned long hard_dirty_limit(unsigned long thresh) | 
 | 362 | { | 
 | 363 | 	return max(thresh, global_dirty_limit); | 
 | 364 | } | 
 | 365 |  | 
| Randy Dunlap | 03ab450 | 2010-08-14 13:05:17 -0700 | [diff] [blame] | 366 | /* | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 367 |  * global_dirty_limits - background-writeback and dirty-throttling thresholds | 
 | 368 |  * | 
 | 369 |  * Calculate the dirty thresholds based on sysctl parameters | 
 | 370 |  * - vm.dirty_background_ratio  or  vm.dirty_background_bytes | 
 | 371 |  * - vm.dirty_ratio             or  vm.dirty_bytes | 
 | 372 |  * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and | 
| Minchan Kim | ebd1373 | 2011-01-04 01:36:48 +0900 | [diff] [blame] | 373 |  * real-time tasks. | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 374 |  */ | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 375 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | { | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 377 | 	unsigned long background; | 
 | 378 | 	unsigned long dirty; | 
| Minchan Kim | 240c879 | 2011-01-13 15:46:27 -0800 | [diff] [blame] | 379 | 	unsigned long uninitialized_var(available_memory); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | 	struct task_struct *tsk; | 
 | 381 |  | 
| Minchan Kim | 240c879 | 2011-01-13 15:46:27 -0800 | [diff] [blame] | 382 | 	if (!vm_dirty_bytes || !dirty_background_bytes) | 
 | 383 | 		available_memory = determine_dirtyable_memory(); | 
 | 384 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 385 | 	if (vm_dirty_bytes) | 
 | 386 | 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | 
| Wu Fengguang | 4cbec4c | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 387 | 	else | 
 | 388 | 		dirty = (vm_dirty_ratio * available_memory) / 100; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 390 | 	if (dirty_background_bytes) | 
 | 391 | 		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | 
 | 392 | 	else | 
 | 393 | 		background = (dirty_background_ratio * available_memory) / 100; | 
 | 394 |  | 
 | 395 | 	if (background >= dirty) | 
 | 396 | 		background = dirty / 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | 	tsk = current; | 
 | 398 | 	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | 
 | 399 | 		background += background / 4; | 
 | 400 | 		dirty += dirty / 4; | 
 | 401 | 	} | 
 | 402 | 	*pbackground = background; | 
 | 403 | 	*pdirty = dirty; | 
| Wu Fengguang | e1cbe23 | 2010-12-06 22:34:29 -0600 | [diff] [blame] | 404 | 	trace_global_dirty_state(background, dirty); | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 405 | } | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 406 |  | 
| Wu Fengguang | 6f71865 | 2011-03-02 17:14:34 -0600 | [diff] [blame] | 407 | /** | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 408 |  * bdi_dirty_limit - @bdi's share of dirty throttling threshold | 
| Wu Fengguang | 6f71865 | 2011-03-02 17:14:34 -0600 | [diff] [blame] | 409 |  * @bdi: the backing_dev_info to query | 
 | 410 |  * @dirty: global dirty limit in pages | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 411 |  * | 
| Wu Fengguang | 6f71865 | 2011-03-02 17:14:34 -0600 | [diff] [blame] | 412 |  * Returns @bdi's dirty limit in pages. The term "dirty" in the context of | 
 | 413 |  * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. | 
| Wu Fengguang | aed21ad | 2011-11-23 11:44:41 -0600 | [diff] [blame] | 414 |  * | 
 | 415 |  * Note that balance_dirty_pages() will only seriously take it as a hard limit | 
 | 416 |  * when sleeping max_pause per page is not enough to keep the dirty pages under | 
 | 417 |  * control. For example, when the device is completely stalled due to some error | 
 | 418 |  * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. | 
 | 419 |  * In the other normal situations, it acts more gently by throttling the tasks | 
 | 420 |  * more (rather than completely block them) when the bdi dirty pages go high. | 
| Wu Fengguang | 6f71865 | 2011-03-02 17:14:34 -0600 | [diff] [blame] | 421 |  * | 
 | 422 |  * It allocates high/low dirty limits to fast/slow devices, in order to prevent | 
| Wu Fengguang | 1babe18 | 2010-08-11 14:17:40 -0700 | [diff] [blame] | 423 |  * - starving fast devices | 
 | 424 |  * - piling up dirty pages (that will take long time to sync) on slow devices | 
 | 425 |  * | 
 | 426 |  * The bdi's share of dirty limit will be adapting to its throughput and | 
 | 427 |  * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. | 
 | 428 |  */ | 
 | 429 | unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 430 | { | 
 | 431 | 	u64 bdi_dirty; | 
 | 432 | 	long numerator, denominator; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 433 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 434 | 	/* | 
 | 435 | 	 * Calculate this BDI's share of the dirty ratio. | 
 | 436 | 	 */ | 
 | 437 | 	bdi_writeout_fraction(bdi, &numerator, &denominator); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 438 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 439 | 	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; | 
 | 440 | 	bdi_dirty *= numerator; | 
 | 441 | 	do_div(bdi_dirty, denominator); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 442 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 443 | 	bdi_dirty += (dirty * bdi->min_ratio) / 100; | 
 | 444 | 	if (bdi_dirty > (dirty * bdi->max_ratio) / 100) | 
 | 445 | 		bdi_dirty = dirty * bdi->max_ratio / 100; | 
 | 446 |  | 
 | 447 | 	return bdi_dirty; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | } | 
 | 449 |  | 
| Wu Fengguang | 6c14ae1 | 2011-03-02 16:04:18 -0600 | [diff] [blame] | 450 | /* | 
 | 451 |  * Dirty position control. | 
 | 452 |  * | 
 | 453 |  * (o) global/bdi setpoints | 
 | 454 |  * | 
 | 455 |  * We want the dirty pages be balanced around the global/bdi setpoints. | 
 | 456 |  * When the number of dirty pages is higher/lower than the setpoint, the | 
 | 457 |  * dirty position control ratio (and hence task dirty ratelimit) will be | 
 | 458 |  * decreased/increased to bring the dirty pages back to the setpoint. | 
 | 459 |  * | 
 | 460 |  *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT | 
 | 461 |  * | 
 | 462 |  *     if (dirty < setpoint) scale up   pos_ratio | 
 | 463 |  *     if (dirty > setpoint) scale down pos_ratio | 
 | 464 |  * | 
 | 465 |  *     if (bdi_dirty < bdi_setpoint) scale up   pos_ratio | 
 | 466 |  *     if (bdi_dirty > bdi_setpoint) scale down pos_ratio | 
 | 467 |  * | 
 | 468 |  *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT | 
 | 469 |  * | 
 | 470 |  * (o) global control line | 
 | 471 |  * | 
 | 472 |  *     ^ pos_ratio | 
 | 473 |  *     | | 
 | 474 |  *     |            |<===== global dirty control scope ======>| | 
 | 475 |  * 2.0 .............* | 
 | 476 |  *     |            .* | 
 | 477 |  *     |            . * | 
 | 478 |  *     |            .   * | 
 | 479 |  *     |            .     * | 
 | 480 |  *     |            .        * | 
 | 481 |  *     |            .            * | 
 | 482 |  * 1.0 ................................* | 
 | 483 |  *     |            .                  .     * | 
 | 484 |  *     |            .                  .          * | 
 | 485 |  *     |            .                  .              * | 
 | 486 |  *     |            .                  .                 * | 
 | 487 |  *     |            .                  .                    * | 
 | 488 |  *   0 +------------.------------------.----------------------*-------------> | 
 | 489 |  *           freerun^          setpoint^                 limit^   dirty pages | 
 | 490 |  * | 
 | 491 |  * (o) bdi control line | 
 | 492 |  * | 
 | 493 |  *     ^ pos_ratio | 
 | 494 |  *     | | 
 | 495 |  *     |            * | 
 | 496 |  *     |              * | 
 | 497 |  *     |                * | 
 | 498 |  *     |                  * | 
 | 499 |  *     |                    * |<=========== span ============>| | 
 | 500 |  * 1.0 .......................* | 
 | 501 |  *     |                      . * | 
 | 502 |  *     |                      .   * | 
 | 503 |  *     |                      .     * | 
 | 504 |  *     |                      .       * | 
 | 505 |  *     |                      .         * | 
 | 506 |  *     |                      .           * | 
 | 507 |  *     |                      .             * | 
 | 508 |  *     |                      .               * | 
 | 509 |  *     |                      .                 * | 
 | 510 |  *     |                      .                   * | 
 | 511 |  *     |                      .                     * | 
 | 512 |  * 1/4 ...............................................* * * * * * * * * * * * | 
 | 513 |  *     |                      .                         . | 
 | 514 |  *     |                      .                           . | 
 | 515 |  *     |                      .                             . | 
 | 516 |  *   0 +----------------------.-------------------------------.-------------> | 
 | 517 |  *                bdi_setpoint^                    x_intercept^ | 
 | 518 |  * | 
 | 519 |  * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can | 
 | 520 |  * be smoothly throttled down to normal if it starts high in situations like | 
 | 521 |  * - start writing to a slow SD card and a fast disk at the same time. The SD | 
 | 522 |  *   card's bdi_dirty may rush to many times higher than bdi_setpoint. | 
 | 523 |  * - the bdi dirty thresh drops quickly due to change of JBOD workload | 
 | 524 |  */ | 
 | 525 | static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, | 
 | 526 | 					unsigned long thresh, | 
 | 527 | 					unsigned long bg_thresh, | 
 | 528 | 					unsigned long dirty, | 
 | 529 | 					unsigned long bdi_thresh, | 
 | 530 | 					unsigned long bdi_dirty) | 
 | 531 | { | 
 | 532 | 	unsigned long write_bw = bdi->avg_write_bandwidth; | 
 | 533 | 	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); | 
 | 534 | 	unsigned long limit = hard_dirty_limit(thresh); | 
 | 535 | 	unsigned long x_intercept; | 
 | 536 | 	unsigned long setpoint;		/* dirty pages' target balance point */ | 
 | 537 | 	unsigned long bdi_setpoint; | 
 | 538 | 	unsigned long span; | 
 | 539 | 	long long pos_ratio;		/* for scaling up/down the rate limit */ | 
 | 540 | 	long x; | 
 | 541 |  | 
 | 542 | 	if (unlikely(dirty >= limit)) | 
 | 543 | 		return 0; | 
 | 544 |  | 
 | 545 | 	/* | 
 | 546 | 	 * global setpoint | 
 | 547 | 	 * | 
 | 548 | 	 *                           setpoint - dirty 3 | 
 | 549 | 	 *        f(dirty) := 1.0 + (----------------) | 
 | 550 | 	 *                           limit - setpoint | 
 | 551 | 	 * | 
 | 552 | 	 * it's a 3rd order polynomial that subjects to | 
 | 553 | 	 * | 
 | 554 | 	 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast | 
 | 555 | 	 * (2) f(setpoint) = 1.0 => the balance point | 
 | 556 | 	 * (3) f(limit)    = 0   => the hard limit | 
 | 557 | 	 * (4) df/dx      <= 0	 => negative feedback control | 
 | 558 | 	 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) | 
 | 559 | 	 *     => fast response on large errors; small oscillation near setpoint | 
 | 560 | 	 */ | 
 | 561 | 	setpoint = (freerun + limit) / 2; | 
 | 562 | 	x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT, | 
 | 563 | 		    limit - setpoint + 1); | 
 | 564 | 	pos_ratio = x; | 
 | 565 | 	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | 
 | 566 | 	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | 
 | 567 | 	pos_ratio += 1 << RATELIMIT_CALC_SHIFT; | 
 | 568 |  | 
 | 569 | 	/* | 
 | 570 | 	 * We have computed basic pos_ratio above based on global situation. If | 
 | 571 | 	 * the bdi is over/under its share of dirty pages, we want to scale | 
 | 572 | 	 * pos_ratio further down/up. That is done by the following mechanism. | 
 | 573 | 	 */ | 
 | 574 |  | 
 | 575 | 	/* | 
 | 576 | 	 * bdi setpoint | 
 | 577 | 	 * | 
 | 578 | 	 *        f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint) | 
 | 579 | 	 * | 
 | 580 | 	 *                        x_intercept - bdi_dirty | 
 | 581 | 	 *                     := -------------------------- | 
 | 582 | 	 *                        x_intercept - bdi_setpoint | 
 | 583 | 	 * | 
 | 584 | 	 * The main bdi control line is a linear function that subjects to | 
 | 585 | 	 * | 
 | 586 | 	 * (1) f(bdi_setpoint) = 1.0 | 
 | 587 | 	 * (2) k = - 1 / (8 * write_bw)  (in single bdi case) | 
 | 588 | 	 *     or equally: x_intercept = bdi_setpoint + 8 * write_bw | 
 | 589 | 	 * | 
 | 590 | 	 * For single bdi case, the dirty pages are observed to fluctuate | 
 | 591 | 	 * regularly within range | 
 | 592 | 	 *        [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2] | 
 | 593 | 	 * for various filesystems, where (2) can yield in a reasonable 12.5% | 
 | 594 | 	 * fluctuation range for pos_ratio. | 
 | 595 | 	 * | 
 | 596 | 	 * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its | 
 | 597 | 	 * own size, so move the slope over accordingly and choose a slope that | 
 | 598 | 	 * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh. | 
 | 599 | 	 */ | 
 | 600 | 	if (unlikely(bdi_thresh > thresh)) | 
 | 601 | 		bdi_thresh = thresh; | 
| Wu Fengguang | aed21ad | 2011-11-23 11:44:41 -0600 | [diff] [blame] | 602 | 	/* | 
 | 603 | 	 * It's very possible that bdi_thresh is close to 0 not because the | 
 | 604 | 	 * device is slow, but that it has remained inactive for long time. | 
 | 605 | 	 * Honour such devices a reasonable good (hopefully IO efficient) | 
 | 606 | 	 * threshold, so that the occasional writes won't be blocked and active | 
 | 607 | 	 * writes can rampup the threshold quickly. | 
 | 608 | 	 */ | 
| Wu Fengguang | 8927f66 | 2011-08-04 22:16:46 -0600 | [diff] [blame] | 609 | 	bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); | 
| Wu Fengguang | 6c14ae1 | 2011-03-02 16:04:18 -0600 | [diff] [blame] | 610 | 	/* | 
 | 611 | 	 * scale global setpoint to bdi's: | 
 | 612 | 	 *	bdi_setpoint = setpoint * bdi_thresh / thresh | 
 | 613 | 	 */ | 
 | 614 | 	x = div_u64((u64)bdi_thresh << 16, thresh + 1); | 
 | 615 | 	bdi_setpoint = setpoint * (u64)x >> 16; | 
 | 616 | 	/* | 
 | 617 | 	 * Use span=(8*write_bw) in single bdi case as indicated by | 
 | 618 | 	 * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case. | 
 | 619 | 	 * | 
 | 620 | 	 *        bdi_thresh                    thresh - bdi_thresh | 
 | 621 | 	 * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh | 
 | 622 | 	 *          thresh                            thresh | 
 | 623 | 	 */ | 
 | 624 | 	span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16; | 
 | 625 | 	x_intercept = bdi_setpoint + span; | 
 | 626 |  | 
 | 627 | 	if (bdi_dirty < x_intercept - span / 4) { | 
| Wu Fengguang | 50657fc | 2011-10-11 17:06:33 -0600 | [diff] [blame] | 628 | 		pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), | 
 | 629 | 				    x_intercept - bdi_setpoint + 1); | 
| Wu Fengguang | 6c14ae1 | 2011-03-02 16:04:18 -0600 | [diff] [blame] | 630 | 	} else | 
 | 631 | 		pos_ratio /= 4; | 
 | 632 |  | 
| Wu Fengguang | 8927f66 | 2011-08-04 22:16:46 -0600 | [diff] [blame] | 633 | 	/* | 
 | 634 | 	 * bdi reserve area, safeguard against dirty pool underrun and disk idle | 
 | 635 | 	 * It may push the desired control point of global dirty pages higher | 
 | 636 | 	 * than setpoint. | 
 | 637 | 	 */ | 
 | 638 | 	x_intercept = bdi_thresh / 2; | 
 | 639 | 	if (bdi_dirty < x_intercept) { | 
| Wu Fengguang | 50657fc | 2011-10-11 17:06:33 -0600 | [diff] [blame] | 640 | 		if (bdi_dirty > x_intercept / 8) | 
 | 641 | 			pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty); | 
 | 642 | 		else | 
| Wu Fengguang | 8927f66 | 2011-08-04 22:16:46 -0600 | [diff] [blame] | 643 | 			pos_ratio *= 8; | 
 | 644 | 	} | 
 | 645 |  | 
| Wu Fengguang | 6c14ae1 | 2011-03-02 16:04:18 -0600 | [diff] [blame] | 646 | 	return pos_ratio; | 
 | 647 | } | 
 | 648 |  | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 649 | static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, | 
 | 650 | 				       unsigned long elapsed, | 
 | 651 | 				       unsigned long written) | 
 | 652 | { | 
 | 653 | 	const unsigned long period = roundup_pow_of_two(3 * HZ); | 
 | 654 | 	unsigned long avg = bdi->avg_write_bandwidth; | 
 | 655 | 	unsigned long old = bdi->write_bandwidth; | 
 | 656 | 	u64 bw; | 
 | 657 |  | 
 | 658 | 	/* | 
 | 659 | 	 * bw = written * HZ / elapsed | 
 | 660 | 	 * | 
 | 661 | 	 *                   bw * elapsed + write_bandwidth * (period - elapsed) | 
 | 662 | 	 * write_bandwidth = --------------------------------------------------- | 
 | 663 | 	 *                                          period | 
 | 664 | 	 */ | 
 | 665 | 	bw = written - bdi->written_stamp; | 
 | 666 | 	bw *= HZ; | 
 | 667 | 	if (unlikely(elapsed > period)) { | 
 | 668 | 		do_div(bw, elapsed); | 
 | 669 | 		avg = bw; | 
 | 670 | 		goto out; | 
 | 671 | 	} | 
 | 672 | 	bw += (u64)bdi->write_bandwidth * (period - elapsed); | 
 | 673 | 	bw >>= ilog2(period); | 
 | 674 |  | 
 | 675 | 	/* | 
 | 676 | 	 * one more level of smoothing, for filtering out sudden spikes | 
 | 677 | 	 */ | 
 | 678 | 	if (avg > old && old >= (unsigned long)bw) | 
 | 679 | 		avg -= (avg - old) >> 3; | 
 | 680 |  | 
 | 681 | 	if (avg < old && old <= (unsigned long)bw) | 
 | 682 | 		avg += (old - avg) >> 3; | 
 | 683 |  | 
 | 684 | out: | 
 | 685 | 	bdi->write_bandwidth = bw; | 
 | 686 | 	bdi->avg_write_bandwidth = avg; | 
 | 687 | } | 
 | 688 |  | 
| Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 689 | /* | 
 | 690 |  * The global dirtyable memory and dirty threshold could be suddenly knocked | 
 | 691 |  * down by a large amount (eg. on the startup of KVM in a swapless system). | 
 | 692 |  * This may throw the system into deep dirty exceeded state and throttle | 
 | 693 |  * heavy/light dirtiers alike. To retain good responsiveness, maintain | 
 | 694 |  * global_dirty_limit for tracking slowly down to the knocked down dirty | 
 | 695 |  * threshold. | 
 | 696 |  */ | 
 | 697 | static void update_dirty_limit(unsigned long thresh, unsigned long dirty) | 
 | 698 | { | 
 | 699 | 	unsigned long limit = global_dirty_limit; | 
 | 700 |  | 
 | 701 | 	/* | 
 | 702 | 	 * Follow up in one step. | 
 | 703 | 	 */ | 
 | 704 | 	if (limit < thresh) { | 
 | 705 | 		limit = thresh; | 
 | 706 | 		goto update; | 
 | 707 | 	} | 
 | 708 |  | 
 | 709 | 	/* | 
 | 710 | 	 * Follow down slowly. Use the higher one as the target, because thresh | 
 | 711 | 	 * may drop below dirty. This is exactly the reason to introduce | 
 | 712 | 	 * global_dirty_limit which is guaranteed to lie above the dirty pages. | 
 | 713 | 	 */ | 
 | 714 | 	thresh = max(thresh, dirty); | 
 | 715 | 	if (limit > thresh) { | 
 | 716 | 		limit -= (limit - thresh) >> 5; | 
 | 717 | 		goto update; | 
 | 718 | 	} | 
 | 719 | 	return; | 
 | 720 | update: | 
 | 721 | 	global_dirty_limit = limit; | 
 | 722 | } | 
 | 723 |  | 
 | 724 | static void global_update_bandwidth(unsigned long thresh, | 
 | 725 | 				    unsigned long dirty, | 
 | 726 | 				    unsigned long now) | 
 | 727 | { | 
 | 728 | 	static DEFINE_SPINLOCK(dirty_lock); | 
 | 729 | 	static unsigned long update_time; | 
 | 730 |  | 
 | 731 | 	/* | 
 | 732 | 	 * check locklessly first to optimize away locking for the most time | 
 | 733 | 	 */ | 
 | 734 | 	if (time_before(now, update_time + BANDWIDTH_INTERVAL)) | 
 | 735 | 		return; | 
 | 736 |  | 
 | 737 | 	spin_lock(&dirty_lock); | 
 | 738 | 	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { | 
 | 739 | 		update_dirty_limit(thresh, dirty); | 
 | 740 | 		update_time = now; | 
 | 741 | 	} | 
 | 742 | 	spin_unlock(&dirty_lock); | 
 | 743 | } | 
 | 744 |  | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 745 | /* | 
 | 746 |  * Maintain bdi->dirty_ratelimit, the base dirty throttle rate. | 
 | 747 |  * | 
 | 748 |  * Normal bdi tasks will be curbed at or below it in long term. | 
 | 749 |  * Obviously it should be around (write_bw / N) when there are N dd tasks. | 
 | 750 |  */ | 
 | 751 | static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, | 
 | 752 | 				       unsigned long thresh, | 
 | 753 | 				       unsigned long bg_thresh, | 
 | 754 | 				       unsigned long dirty, | 
 | 755 | 				       unsigned long bdi_thresh, | 
 | 756 | 				       unsigned long bdi_dirty, | 
 | 757 | 				       unsigned long dirtied, | 
 | 758 | 				       unsigned long elapsed) | 
 | 759 | { | 
| Wu Fengguang | 7381131 | 2011-08-26 15:53:24 -0600 | [diff] [blame] | 760 | 	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); | 
 | 761 | 	unsigned long limit = hard_dirty_limit(thresh); | 
 | 762 | 	unsigned long setpoint = (freerun + limit) / 2; | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 763 | 	unsigned long write_bw = bdi->avg_write_bandwidth; | 
 | 764 | 	unsigned long dirty_ratelimit = bdi->dirty_ratelimit; | 
 | 765 | 	unsigned long dirty_rate; | 
 | 766 | 	unsigned long task_ratelimit; | 
 | 767 | 	unsigned long balanced_dirty_ratelimit; | 
 | 768 | 	unsigned long pos_ratio; | 
| Wu Fengguang | 7381131 | 2011-08-26 15:53:24 -0600 | [diff] [blame] | 769 | 	unsigned long step; | 
 | 770 | 	unsigned long x; | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 771 |  | 
 | 772 | 	/* | 
 | 773 | 	 * The dirty rate will match the writeout rate in long term, except | 
 | 774 | 	 * when dirty pages are truncated by userspace or re-dirtied by FS. | 
 | 775 | 	 */ | 
 | 776 | 	dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed; | 
 | 777 |  | 
 | 778 | 	pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty, | 
 | 779 | 				       bdi_thresh, bdi_dirty); | 
 | 780 | 	/* | 
 | 781 | 	 * task_ratelimit reflects each dd's dirty rate for the past 200ms. | 
 | 782 | 	 */ | 
 | 783 | 	task_ratelimit = (u64)dirty_ratelimit * | 
 | 784 | 					pos_ratio >> RATELIMIT_CALC_SHIFT; | 
 | 785 | 	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ | 
 | 786 |  | 
 | 787 | 	/* | 
 | 788 | 	 * A linear estimation of the "balanced" throttle rate. The theory is, | 
 | 789 | 	 * if there are N dd tasks, each throttled at task_ratelimit, the bdi's | 
 | 790 | 	 * dirty_rate will be measured to be (N * task_ratelimit). So the below | 
 | 791 | 	 * formula will yield the balanced rate limit (write_bw / N). | 
 | 792 | 	 * | 
 | 793 | 	 * Note that the expanded form is not a pure rate feedback: | 
 | 794 | 	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1) | 
 | 795 | 	 * but also takes pos_ratio into account: | 
 | 796 | 	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2) | 
 | 797 | 	 * | 
 | 798 | 	 * (1) is not realistic because pos_ratio also takes part in balancing | 
 | 799 | 	 * the dirty rate.  Consider the state | 
 | 800 | 	 *	pos_ratio = 0.5						     (3) | 
 | 801 | 	 *	rate = 2 * (write_bw / N)				     (4) | 
 | 802 | 	 * If (1) is used, it will stuck in that state! Because each dd will | 
 | 803 | 	 * be throttled at | 
 | 804 | 	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5) | 
 | 805 | 	 * yielding | 
 | 806 | 	 *	dirty_rate = N * task_ratelimit = write_bw		     (6) | 
 | 807 | 	 * put (6) into (1) we get | 
 | 808 | 	 *	rate_(i+1) = rate_(i)					     (7) | 
 | 809 | 	 * | 
 | 810 | 	 * So we end up using (2) to always keep | 
 | 811 | 	 *	rate_(i+1) ~= (write_bw / N)				     (8) | 
 | 812 | 	 * regardless of the value of pos_ratio. As long as (8) is satisfied, | 
 | 813 | 	 * pos_ratio is able to drive itself to 1.0, which is not only where | 
 | 814 | 	 * the dirty count meet the setpoint, but also where the slope of | 
 | 815 | 	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated. | 
 | 816 | 	 */ | 
 | 817 | 	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, | 
 | 818 | 					   dirty_rate | 1); | 
 | 819 |  | 
| Wu Fengguang | 7381131 | 2011-08-26 15:53:24 -0600 | [diff] [blame] | 820 | 	/* | 
 | 821 | 	 * We could safely do this and return immediately: | 
 | 822 | 	 * | 
 | 823 | 	 *	bdi->dirty_ratelimit = balanced_dirty_ratelimit; | 
 | 824 | 	 * | 
 | 825 | 	 * However to get a more stable dirty_ratelimit, the below elaborated | 
 | 826 | 	 * code makes use of task_ratelimit to filter out sigular points and | 
 | 827 | 	 * limit the step size. | 
 | 828 | 	 * | 
 | 829 | 	 * The below code essentially only uses the relative value of | 
 | 830 | 	 * | 
 | 831 | 	 *	task_ratelimit - dirty_ratelimit | 
 | 832 | 	 *	= (pos_ratio - 1) * dirty_ratelimit | 
 | 833 | 	 * | 
 | 834 | 	 * which reflects the direction and size of dirty position error. | 
 | 835 | 	 */ | 
 | 836 |  | 
 | 837 | 	/* | 
 | 838 | 	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff | 
 | 839 | 	 * task_ratelimit is on the same side of dirty_ratelimit, too. | 
 | 840 | 	 * For example, when | 
 | 841 | 	 * - dirty_ratelimit > balanced_dirty_ratelimit | 
 | 842 | 	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) | 
 | 843 | 	 * lowering dirty_ratelimit will help meet both the position and rate | 
 | 844 | 	 * control targets. Otherwise, don't update dirty_ratelimit if it will | 
 | 845 | 	 * only help meet the rate target. After all, what the users ultimately | 
 | 846 | 	 * feel and care are stable dirty rate and small position error. | 
 | 847 | 	 * | 
 | 848 | 	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size | 
 | 849 | 	 * and filter out the sigular points of balanced_dirty_ratelimit. Which | 
 | 850 | 	 * keeps jumping around randomly and can even leap far away at times | 
 | 851 | 	 * due to the small 200ms estimation period of dirty_rate (we want to | 
 | 852 | 	 * keep that period small to reduce time lags). | 
 | 853 | 	 */ | 
 | 854 | 	step = 0; | 
 | 855 | 	if (dirty < setpoint) { | 
 | 856 | 		x = min(bdi->balanced_dirty_ratelimit, | 
 | 857 | 			 min(balanced_dirty_ratelimit, task_ratelimit)); | 
 | 858 | 		if (dirty_ratelimit < x) | 
 | 859 | 			step = x - dirty_ratelimit; | 
 | 860 | 	} else { | 
 | 861 | 		x = max(bdi->balanced_dirty_ratelimit, | 
 | 862 | 			 max(balanced_dirty_ratelimit, task_ratelimit)); | 
 | 863 | 		if (dirty_ratelimit > x) | 
 | 864 | 			step = dirty_ratelimit - x; | 
 | 865 | 	} | 
 | 866 |  | 
 | 867 | 	/* | 
 | 868 | 	 * Don't pursue 100% rate matching. It's impossible since the balanced | 
 | 869 | 	 * rate itself is constantly fluctuating. So decrease the track speed | 
 | 870 | 	 * when it gets close to the target. Helps eliminate pointless tremors. | 
 | 871 | 	 */ | 
 | 872 | 	step >>= dirty_ratelimit / (2 * step + 1); | 
 | 873 | 	/* | 
 | 874 | 	 * Limit the tracking speed to avoid overshooting. | 
 | 875 | 	 */ | 
 | 876 | 	step = (step + 7) / 8; | 
 | 877 |  | 
 | 878 | 	if (dirty_ratelimit < balanced_dirty_ratelimit) | 
 | 879 | 		dirty_ratelimit += step; | 
 | 880 | 	else | 
 | 881 | 		dirty_ratelimit -= step; | 
 | 882 |  | 
 | 883 | 	bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL); | 
 | 884 | 	bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit; | 
| Wu Fengguang | b48c104 | 2011-03-02 17:22:49 -0600 | [diff] [blame] | 885 |  | 
 | 886 | 	trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit); | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 887 | } | 
 | 888 |  | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 889 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, | 
| Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 890 | 			    unsigned long thresh, | 
| Wu Fengguang | af6a311 | 2011-10-03 20:46:17 -0600 | [diff] [blame] | 891 | 			    unsigned long bg_thresh, | 
| Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 892 | 			    unsigned long dirty, | 
 | 893 | 			    unsigned long bdi_thresh, | 
 | 894 | 			    unsigned long bdi_dirty, | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 895 | 			    unsigned long start_time) | 
 | 896 | { | 
 | 897 | 	unsigned long now = jiffies; | 
 | 898 | 	unsigned long elapsed = now - bdi->bw_time_stamp; | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 899 | 	unsigned long dirtied; | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 900 | 	unsigned long written; | 
 | 901 |  | 
 | 902 | 	/* | 
 | 903 | 	 * rate-limit, only update once every 200ms. | 
 | 904 | 	 */ | 
 | 905 | 	if (elapsed < BANDWIDTH_INTERVAL) | 
 | 906 | 		return; | 
 | 907 |  | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 908 | 	dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]); | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 909 | 	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); | 
 | 910 |  | 
 | 911 | 	/* | 
 | 912 | 	 * Skip quiet periods when disk bandwidth is under-utilized. | 
 | 913 | 	 * (at least 1s idle time between two flusher runs) | 
 | 914 | 	 */ | 
 | 915 | 	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) | 
 | 916 | 		goto snapshot; | 
 | 917 |  | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 918 | 	if (thresh) { | 
| Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 919 | 		global_update_bandwidth(thresh, dirty, now); | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 920 | 		bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty, | 
 | 921 | 					   bdi_thresh, bdi_dirty, | 
 | 922 | 					   dirtied, elapsed); | 
 | 923 | 	} | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 924 | 	bdi_update_write_bandwidth(bdi, elapsed, written); | 
 | 925 |  | 
 | 926 | snapshot: | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 927 | 	bdi->dirtied_stamp = dirtied; | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 928 | 	bdi->written_stamp = written; | 
 | 929 | 	bdi->bw_time_stamp = now; | 
 | 930 | } | 
 | 931 |  | 
 | 932 | static void bdi_update_bandwidth(struct backing_dev_info *bdi, | 
| Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 933 | 				 unsigned long thresh, | 
| Wu Fengguang | af6a311 | 2011-10-03 20:46:17 -0600 | [diff] [blame] | 934 | 				 unsigned long bg_thresh, | 
| Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 935 | 				 unsigned long dirty, | 
 | 936 | 				 unsigned long bdi_thresh, | 
 | 937 | 				 unsigned long bdi_dirty, | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 938 | 				 unsigned long start_time) | 
 | 939 | { | 
 | 940 | 	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) | 
 | 941 | 		return; | 
 | 942 | 	spin_lock(&bdi->wb.list_lock); | 
| Wu Fengguang | af6a311 | 2011-10-03 20:46:17 -0600 | [diff] [blame] | 943 | 	__bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty, | 
 | 944 | 			       bdi_thresh, bdi_dirty, start_time); | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 945 | 	spin_unlock(&bdi->wb.list_lock); | 
 | 946 | } | 
 | 947 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | /* | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 949 |  * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr() | 
 | 950 |  * will look to see if it needs to start dirty throttling. | 
 | 951 |  * | 
 | 952 |  * If dirty_poll_interval is too low, big NUMA machines will call the expensive | 
 | 953 |  * global_page_state() too often. So scale it near-sqrt to the safety margin | 
 | 954 |  * (the number of pages we may dirty without exceeding the dirty limits). | 
 | 955 |  */ | 
 | 956 | static unsigned long dirty_poll_interval(unsigned long dirty, | 
 | 957 | 					 unsigned long thresh) | 
 | 958 | { | 
 | 959 | 	if (thresh > dirty) | 
 | 960 | 		return 1UL << (ilog2(thresh - dirty) >> 1); | 
 | 961 |  | 
 | 962 | 	return 1; | 
 | 963 | } | 
 | 964 |  | 
| Wu Fengguang | c8462cc | 2011-06-11 19:21:43 -0600 | [diff] [blame] | 965 | static unsigned long bdi_max_pause(struct backing_dev_info *bdi, | 
 | 966 | 				   unsigned long bdi_dirty) | 
 | 967 | { | 
 | 968 | 	unsigned long bw = bdi->avg_write_bandwidth; | 
 | 969 | 	unsigned long hi = ilog2(bw); | 
 | 970 | 	unsigned long lo = ilog2(bdi->dirty_ratelimit); | 
 | 971 | 	unsigned long t; | 
 | 972 |  | 
 | 973 | 	/* target for 20ms max pause on 1-dd case */ | 
 | 974 | 	t = HZ / 50; | 
 | 975 |  | 
 | 976 | 	/* | 
 | 977 | 	 * Scale up pause time for concurrent dirtiers in order to reduce CPU | 
 | 978 | 	 * overheads. | 
 | 979 | 	 * | 
 | 980 | 	 * (N * 20ms) on 2^N concurrent tasks. | 
 | 981 | 	 */ | 
 | 982 | 	if (hi > lo) | 
 | 983 | 		t += (hi - lo) * (20 * HZ) / 1024; | 
 | 984 |  | 
 | 985 | 	/* | 
 | 986 | 	 * Limit pause time for small memory systems. If sleeping for too long | 
 | 987 | 	 * time, a small pool of dirty/writeback pages may go empty and disk go | 
 | 988 | 	 * idle. | 
 | 989 | 	 * | 
 | 990 | 	 * 8 serves as the safety ratio. | 
 | 991 | 	 */ | 
| Wu Fengguang | 82e230a | 2011-12-02 18:21:51 -0600 | [diff] [blame] | 992 | 	t = min(t, bdi_dirty * HZ / (8 * bw + 1)); | 
| Wu Fengguang | c8462cc | 2011-06-11 19:21:43 -0600 | [diff] [blame] | 993 |  | 
 | 994 | 	/* | 
 | 995 | 	 * The pause time will be settled within range (max_pause/4, max_pause). | 
 | 996 | 	 * Apply a minimal value of 4 to get a non-zero max_pause/4. | 
 | 997 | 	 */ | 
 | 998 | 	return clamp_val(t, 4, MAX_PAUSE); | 
 | 999 | } | 
 | 1000 |  | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1001 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 |  * balance_dirty_pages() must be called by processes which are generating dirty | 
 | 1003 |  * data.  It looks at the number of dirty pages in the machine and will force | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1004 |  * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. | 
| Jens Axboe | 5b0830c | 2009-09-23 19:37:09 +0200 | [diff] [blame] | 1005 |  * If we're over `background_thresh' then the writeback threads are woken to | 
 | 1006 |  * perform some writeout. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 |  */ | 
| Wu Fengguang | 3a2e9a5 | 2009-09-23 21:56:00 +0800 | [diff] [blame] | 1008 | static void balance_dirty_pages(struct address_space *mapping, | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1009 | 				unsigned long pages_dirtied) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | { | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1011 | 	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */ | 
 | 1012 | 	unsigned long bdi_reclaimable; | 
| Wu Fengguang | 7762741 | 2010-09-12 13:34:05 -0600 | [diff] [blame] | 1013 | 	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */ | 
 | 1014 | 	unsigned long bdi_dirty; | 
| Wu Fengguang | 6c14ae1 | 2011-03-02 16:04:18 -0600 | [diff] [blame] | 1015 | 	unsigned long freerun; | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 1016 | 	unsigned long background_thresh; | 
 | 1017 | 	unsigned long dirty_thresh; | 
 | 1018 | 	unsigned long bdi_thresh; | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1019 | 	long pause = 0; | 
| Wu Fengguang | 50657fc | 2011-10-11 17:06:33 -0600 | [diff] [blame] | 1020 | 	long uninitialized_var(max_pause); | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 1021 | 	bool dirty_exceeded = false; | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1022 | 	unsigned long task_ratelimit; | 
| Wu Fengguang | 50657fc | 2011-10-11 17:06:33 -0600 | [diff] [blame] | 1023 | 	unsigned long uninitialized_var(dirty_ratelimit); | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1024 | 	unsigned long pos_ratio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 | 	struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 1026 | 	unsigned long start_time = jiffies; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 |  | 
 | 1028 | 	for (;;) { | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1029 | 		/* | 
 | 1030 | 		 * Unstable writes are a feature of certain networked | 
 | 1031 | 		 * filesystems (i.e. NFS) in which data may have been | 
 | 1032 | 		 * written to the server's write cache, but has not yet | 
 | 1033 | 		 * been flushed to permanent storage. | 
 | 1034 | 		 */ | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 1035 | 		nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 
 | 1036 | 					global_page_state(NR_UNSTABLE_NFS); | 
| Wu Fengguang | 7762741 | 2010-09-12 13:34:05 -0600 | [diff] [blame] | 1037 | 		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 1038 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 1039 | 		global_dirty_limits(&background_thresh, &dirty_thresh); | 
 | 1040 |  | 
 | 1041 | 		/* | 
 | 1042 | 		 * Throttle it only when the background writeback cannot | 
 | 1043 | 		 * catch-up. This avoids (excessively) small writeouts | 
 | 1044 | 		 * when the bdi limits are ramping up. | 
 | 1045 | 		 */ | 
| Wu Fengguang | 6c14ae1 | 2011-03-02 16:04:18 -0600 | [diff] [blame] | 1046 | 		freerun = dirty_freerun_ceiling(dirty_thresh, | 
 | 1047 | 						background_thresh); | 
 | 1048 | 		if (nr_dirty <= freerun) | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 1049 | 			break; | 
 | 1050 |  | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1051 | 		if (unlikely(!writeback_in_progress(bdi))) | 
 | 1052 | 			bdi_start_background_writeback(bdi); | 
 | 1053 |  | 
 | 1054 | 		/* | 
 | 1055 | 		 * bdi_thresh is not treated as some limiting factor as | 
 | 1056 | 		 * dirty_thresh, due to reasons | 
 | 1057 | 		 * - in JBOD setup, bdi_thresh can fluctuate a lot | 
 | 1058 | 		 * - in a system with HDD and USB key, the USB key may somehow | 
 | 1059 | 		 *   go into state (bdi_dirty >> bdi_thresh) either because | 
 | 1060 | 		 *   bdi_dirty starts high, or because bdi_thresh drops low. | 
 | 1061 | 		 *   In this case we don't want to hard throttle the USB key | 
 | 1062 | 		 *   dirtiers for 100 seconds until bdi_dirty drops under | 
 | 1063 | 		 *   bdi_thresh. Instead the auxiliary bdi control line in | 
 | 1064 | 		 *   bdi_position_ratio() will let the dirtier task progress | 
 | 1065 | 		 *   at some rate <= (write_bw / 2) for bringing down bdi_dirty. | 
 | 1066 | 		 */ | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 1067 | 		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 1068 |  | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 1069 | 		/* | 
 | 1070 | 		 * In order to avoid the stacked BDI deadlock we need | 
 | 1071 | 		 * to ensure we accurately count the 'dirty' pages when | 
 | 1072 | 		 * the threshold is low. | 
 | 1073 | 		 * | 
 | 1074 | 		 * Otherwise it would be possible to get thresh+n pages | 
 | 1075 | 		 * reported dirty, even though there are thresh-m pages | 
 | 1076 | 		 * actually dirty; with m+n sitting in the percpu | 
 | 1077 | 		 * deltas. | 
 | 1078 | 		 */ | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1079 | 		if (bdi_thresh < 2 * bdi_stat_error(bdi)) { | 
 | 1080 | 			bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); | 
 | 1081 | 			bdi_dirty = bdi_reclaimable + | 
| Wu Fengguang | 7762741 | 2010-09-12 13:34:05 -0600 | [diff] [blame] | 1082 | 				    bdi_stat_sum(bdi, BDI_WRITEBACK); | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 1083 | 		} else { | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1084 | 			bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 
 | 1085 | 			bdi_dirty = bdi_reclaimable + | 
| Wu Fengguang | 7762741 | 2010-09-12 13:34:05 -0600 | [diff] [blame] | 1086 | 				    bdi_stat(bdi, BDI_WRITEBACK); | 
| Wu Fengguang | e50e372 | 2010-08-11 14:17:37 -0700 | [diff] [blame] | 1087 | 		} | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 1088 |  | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1089 | 		dirty_exceeded = (bdi_dirty > bdi_thresh) || | 
| Wu Fengguang | 7762741 | 2010-09-12 13:34:05 -0600 | [diff] [blame] | 1090 | 				  (nr_dirty > dirty_thresh); | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1091 | 		if (dirty_exceeded && !bdi->dirty_exceeded) | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1092 | 			bdi->dirty_exceeded = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 |  | 
| Wu Fengguang | af6a311 | 2011-10-03 20:46:17 -0600 | [diff] [blame] | 1094 | 		bdi_update_bandwidth(bdi, dirty_thresh, background_thresh, | 
 | 1095 | 				     nr_dirty, bdi_thresh, bdi_dirty, | 
 | 1096 | 				     start_time); | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 1097 |  | 
| Wu Fengguang | c8462cc | 2011-06-11 19:21:43 -0600 | [diff] [blame] | 1098 | 		max_pause = bdi_max_pause(bdi, bdi_dirty); | 
 | 1099 |  | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1100 | 		dirty_ratelimit = bdi->dirty_ratelimit; | 
 | 1101 | 		pos_ratio = bdi_position_ratio(bdi, dirty_thresh, | 
 | 1102 | 					       background_thresh, nr_dirty, | 
 | 1103 | 					       bdi_thresh, bdi_dirty); | 
| Wu Fengguang | 3a73dbb | 2011-11-07 19:19:28 +0800 | [diff] [blame] | 1104 | 		task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >> | 
 | 1105 | 							RATELIMIT_CALC_SHIFT; | 
 | 1106 | 		if (unlikely(task_ratelimit == 0)) { | 
| Wu Fengguang | c8462cc | 2011-06-11 19:21:43 -0600 | [diff] [blame] | 1107 | 			pause = max_pause; | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1108 | 			goto pause; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1109 | 		} | 
| Wu Fengguang | 3a73dbb | 2011-11-07 19:19:28 +0800 | [diff] [blame] | 1110 | 		pause = HZ * pages_dirtied / task_ratelimit; | 
| Wu Fengguang | 57fc978 | 2011-06-11 19:32:32 -0600 | [diff] [blame] | 1111 | 		if (unlikely(pause <= 0)) { | 
| Wu Fengguang | ece13ac | 2010-08-29 23:33:20 -0600 | [diff] [blame] | 1112 | 			trace_balance_dirty_pages(bdi, | 
 | 1113 | 						  dirty_thresh, | 
 | 1114 | 						  background_thresh, | 
 | 1115 | 						  nr_dirty, | 
 | 1116 | 						  bdi_thresh, | 
 | 1117 | 						  bdi_dirty, | 
 | 1118 | 						  dirty_ratelimit, | 
 | 1119 | 						  task_ratelimit, | 
 | 1120 | 						  pages_dirtied, | 
 | 1121 | 						  pause, | 
 | 1122 | 						  start_time); | 
| Wu Fengguang | 57fc978 | 2011-06-11 19:32:32 -0600 | [diff] [blame] | 1123 | 			pause = 1; /* avoid resetting nr_dirtied_pause below */ | 
 | 1124 | 			break; | 
 | 1125 | 		} | 
| Wu Fengguang | c8462cc | 2011-06-11 19:21:43 -0600 | [diff] [blame] | 1126 | 		pause = min(pause, max_pause); | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1127 |  | 
 | 1128 | pause: | 
| Wu Fengguang | ece13ac | 2010-08-29 23:33:20 -0600 | [diff] [blame] | 1129 | 		trace_balance_dirty_pages(bdi, | 
 | 1130 | 					  dirty_thresh, | 
 | 1131 | 					  background_thresh, | 
 | 1132 | 					  nr_dirty, | 
 | 1133 | 					  bdi_thresh, | 
 | 1134 | 					  bdi_dirty, | 
 | 1135 | 					  dirty_ratelimit, | 
 | 1136 | 					  task_ratelimit, | 
 | 1137 | 					  pages_dirtied, | 
 | 1138 | 					  pause, | 
 | 1139 | 					  start_time); | 
| Jan Kara | 499d05e | 2011-11-16 19:34:48 +0800 | [diff] [blame] | 1140 | 		__set_current_state(TASK_KILLABLE); | 
| Wu Fengguang | d25105e | 2009-10-09 12:40:42 +0200 | [diff] [blame] | 1141 | 		io_schedule_timeout(pause); | 
| Jens Axboe | 87c6a9b | 2009-09-17 19:59:14 +0200 | [diff] [blame] | 1142 |  | 
| Wu Fengguang | ffd1f60 | 2011-06-19 22:18:42 -0600 | [diff] [blame] | 1143 | 		/* | 
| Wu Fengguang | 1df6471 | 2011-11-13 19:47:32 -0600 | [diff] [blame] | 1144 | 		 * This is typically equal to (nr_dirty < dirty_thresh) and can | 
 | 1145 | 		 * also keep "1000+ dd on a slow USB stick" under control. | 
| Wu Fengguang | ffd1f60 | 2011-06-19 22:18:42 -0600 | [diff] [blame] | 1146 | 		 */ | 
| Wu Fengguang | 1df6471 | 2011-11-13 19:47:32 -0600 | [diff] [blame] | 1147 | 		if (task_ratelimit) | 
| Wu Fengguang | ffd1f60 | 2011-06-19 22:18:42 -0600 | [diff] [blame] | 1148 | 			break; | 
| Jan Kara | 499d05e | 2011-11-16 19:34:48 +0800 | [diff] [blame] | 1149 |  | 
| Wu Fengguang | c5c6343 | 2011-12-02 10:21:33 -0600 | [diff] [blame] | 1150 | 		/* | 
 | 1151 | 		 * In the case of an unresponding NFS server and the NFS dirty | 
 | 1152 | 		 * pages exceeds dirty_thresh, give the other good bdi's a pipe | 
 | 1153 | 		 * to go through, so that tasks on them still remain responsive. | 
 | 1154 | 		 * | 
 | 1155 | 		 * In theory 1 page is enough to keep the comsumer-producer | 
 | 1156 | 		 * pipe going: the flusher cleans 1 page => the task dirties 1 | 
 | 1157 | 		 * more page. However bdi_dirty has accounting errors.  So use | 
 | 1158 | 		 * the larger and more IO friendly bdi_stat_error. | 
 | 1159 | 		 */ | 
 | 1160 | 		if (bdi_dirty <= bdi_stat_error(bdi)) | 
 | 1161 | 			break; | 
 | 1162 |  | 
| Jan Kara | 499d05e | 2011-11-16 19:34:48 +0800 | [diff] [blame] | 1163 | 		if (fatal_signal_pending(current)) | 
 | 1164 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | 	} | 
 | 1166 |  | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1167 | 	if (!dirty_exceeded && bdi->dirty_exceeded) | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1168 | 		bdi->dirty_exceeded = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 |  | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1170 | 	current->nr_dirtied = 0; | 
| Wu Fengguang | 57fc978 | 2011-06-11 19:32:32 -0600 | [diff] [blame] | 1171 | 	if (pause == 0) { /* in freerun area */ | 
 | 1172 | 		current->nr_dirtied_pause = | 
 | 1173 | 				dirty_poll_interval(nr_dirty, dirty_thresh); | 
 | 1174 | 	} else if (pause <= max_pause / 4 && | 
 | 1175 | 		   pages_dirtied >= current->nr_dirtied_pause) { | 
 | 1176 | 		current->nr_dirtied_pause = clamp_val( | 
 | 1177 | 					dirty_ratelimit * (max_pause / 2) / HZ, | 
 | 1178 | 					pages_dirtied + pages_dirtied / 8, | 
 | 1179 | 					pages_dirtied * 4); | 
 | 1180 | 	} else if (pause >= max_pause) { | 
 | 1181 | 		current->nr_dirtied_pause = 1 | clamp_val( | 
 | 1182 | 					dirty_ratelimit * (max_pause / 2) / HZ, | 
 | 1183 | 					pages_dirtied / 4, | 
 | 1184 | 					pages_dirtied - pages_dirtied / 8); | 
 | 1185 | 	} | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1186 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 | 	if (writeback_in_progress(bdi)) | 
| Jens Axboe | 5b0830c | 2009-09-23 19:37:09 +0200 | [diff] [blame] | 1188 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 |  | 
 | 1190 | 	/* | 
 | 1191 | 	 * In laptop mode, we wait until hitting the higher threshold before | 
 | 1192 | 	 * starting background writeout, and then write out all the way down | 
 | 1193 | 	 * to the lower threshold.  So slow writers cause minimal disk activity. | 
 | 1194 | 	 * | 
 | 1195 | 	 * In normal mode, we start background writeout at the lower | 
 | 1196 | 	 * background_thresh, to keep the amount of dirty memory low. | 
 | 1197 | 	 */ | 
| Wu Fengguang | 143dfe8 | 2010-08-27 18:45:12 -0600 | [diff] [blame] | 1198 | 	if (laptop_mode) | 
 | 1199 | 		return; | 
 | 1200 |  | 
 | 1201 | 	if (nr_reclaimable > background_thresh) | 
| Christoph Hellwig | c544419 | 2010-06-08 18:15:15 +0200 | [diff] [blame] | 1202 | 		bdi_start_background_writeback(bdi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | } | 
 | 1204 |  | 
| Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 1205 | void set_page_dirty_balance(struct page *page, int page_mkwrite) | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 1206 | { | 
| Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 1207 | 	if (set_page_dirty(page) || page_mkwrite) { | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 1208 | 		struct address_space *mapping = page_mapping(page); | 
 | 1209 |  | 
 | 1210 | 		if (mapping) | 
 | 1211 | 			balance_dirty_pages_ratelimited(mapping); | 
 | 1212 | 	} | 
 | 1213 | } | 
 | 1214 |  | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1215 | static DEFINE_PER_CPU(int, bdp_ratelimits); | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 1216 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | /** | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 1218 |  * balance_dirty_pages_ratelimited_nr - balance dirty memory state | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 1219 |  * @mapping: address_space which was dirtied | 
| Martin Waitz | a580290 | 2006-04-02 13:59:55 +0200 | [diff] [blame] | 1220 |  * @nr_pages_dirtied: number of pages which the caller has just dirtied | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 |  * | 
 | 1222 |  * Processes which are dirtying memory should call in here once for each page | 
 | 1223 |  * which was newly dirtied.  The function will periodically check the system's | 
 | 1224 |  * dirty state and will initiate writeback if needed. | 
 | 1225 |  * | 
 | 1226 |  * On really big machines, get_writeback_state is expensive, so try to avoid | 
 | 1227 |  * calling it too often (ratelimiting).  But once we're over the dirty memory | 
 | 1228 |  * limit we decrease the ratelimiting by a lot, to prevent individual processes | 
 | 1229 |  * from overshooting the limit by (ratelimit_pages) each. | 
 | 1230 |  */ | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 1231 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | 
 | 1232 | 					unsigned long nr_pages_dirtied) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1233 | { | 
| Wu Fengguang | 36715ce | 2011-06-11 17:53:57 -0600 | [diff] [blame] | 1234 | 	struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1235 | 	int ratelimit; | 
 | 1236 | 	int *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 |  | 
| Wu Fengguang | 36715ce | 2011-06-11 17:53:57 -0600 | [diff] [blame] | 1238 | 	if (!bdi_cap_account_dirty(bdi)) | 
 | 1239 | 		return; | 
 | 1240 |  | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1241 | 	ratelimit = current->nr_dirtied_pause; | 
 | 1242 | 	if (bdi->dirty_exceeded) | 
 | 1243 | 		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1244 |  | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1245 | 	current->nr_dirtied += nr_pages_dirtied; | 
 | 1246 |  | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 1247 | 	preempt_disable(); | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1248 | 	/* | 
 | 1249 | 	 * This prevents one CPU to accumulate too many dirtied pages without | 
 | 1250 | 	 * calling into balance_dirty_pages(), which can happen when there are | 
 | 1251 | 	 * 1000+ tasks, all of them start dirtying pages at exactly the same | 
 | 1252 | 	 * time, hence all honoured too large initial task->nr_dirtied_pause. | 
 | 1253 | 	 */ | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 1254 | 	p =  &__get_cpu_var(bdp_ratelimits); | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1255 | 	if (unlikely(current->nr_dirtied >= ratelimit)) | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 1256 | 		*p = 0; | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1257 | 	else { | 
 | 1258 | 		*p += nr_pages_dirtied; | 
 | 1259 | 		if (unlikely(*p >= ratelimit_pages)) { | 
 | 1260 | 			*p = 0; | 
 | 1261 | 			ratelimit = 0; | 
 | 1262 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | 	} | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 1264 | 	preempt_enable(); | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1265 |  | 
 | 1266 | 	if (unlikely(current->nr_dirtied >= ratelimit)) | 
 | 1267 | 		balance_dirty_pages(mapping, current->nr_dirtied); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1268 | } | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 1269 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1270 |  | 
| Andrew Morton | 232ea4d | 2007-02-28 20:13:21 -0800 | [diff] [blame] | 1271 | void throttle_vm_writeout(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | { | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 1273 | 	unsigned long background_thresh; | 
 | 1274 | 	unsigned long dirty_thresh; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 |  | 
 | 1276 |         for ( ; ; ) { | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 1277 | 		global_dirty_limits(&background_thresh, &dirty_thresh); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 |  | 
 | 1279 |                 /* | 
 | 1280 |                  * Boost the allowable dirty threshold a bit for page | 
 | 1281 |                  * allocators so they don't get DoS'ed by heavy writers | 
 | 1282 |                  */ | 
 | 1283 |                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */ | 
 | 1284 |  | 
| Christoph Lameter | c24f21b | 2006-06-30 01:55:42 -0700 | [diff] [blame] | 1285 |                 if (global_page_state(NR_UNSTABLE_NFS) + | 
 | 1286 | 			global_page_state(NR_WRITEBACK) <= dirty_thresh) | 
 | 1287 |                         	break; | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 1288 |                 congestion_wait(BLK_RW_ASYNC, HZ/10); | 
| Fengguang Wu | 369f238 | 2007-10-16 23:30:45 -0700 | [diff] [blame] | 1289 |  | 
 | 1290 | 		/* | 
 | 1291 | 		 * The caller might hold locks which can prevent IO completion | 
 | 1292 | 		 * or progress in the filesystem.  So we cannot just sit here | 
 | 1293 | 		 * waiting for IO to complete. | 
 | 1294 | 		 */ | 
 | 1295 | 		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) | 
 | 1296 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 |         } | 
 | 1298 | } | 
 | 1299 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 |  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | 
 | 1302 |  */ | 
 | 1303 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 1304 | 	void __user *buffer, size_t *length, loff_t *ppos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | { | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 1306 | 	proc_dointvec(table, write, buffer, length, ppos); | 
| Jens Axboe | 6423104 | 2010-05-21 20:00:35 +0200 | [diff] [blame] | 1307 | 	bdi_arm_supers_timer(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | 	return 0; | 
 | 1309 | } | 
 | 1310 |  | 
| Jens Axboe | c2c4986 | 2010-05-20 09:18:47 +0200 | [diff] [blame] | 1311 | #ifdef CONFIG_BLOCK | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 1312 | void laptop_mode_timer_fn(unsigned long data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1313 | { | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 1314 | 	struct request_queue *q = (struct request_queue *)data; | 
 | 1315 | 	int nr_pages = global_page_state(NR_FILE_DIRTY) + | 
 | 1316 | 		global_page_state(NR_UNSTABLE_NFS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 |  | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 1318 | 	/* | 
 | 1319 | 	 * We want to write everything out, not just down to the dirty | 
 | 1320 | 	 * threshold | 
 | 1321 | 	 */ | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 1322 | 	if (bdi_has_dirty_io(&q->backing_dev_info)) | 
| Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1323 | 		bdi_start_writeback(&q->backing_dev_info, nr_pages, | 
 | 1324 | 					WB_REASON_LAPTOP_TIMER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | } | 
 | 1326 |  | 
 | 1327 | /* | 
 | 1328 |  * We've spun up the disk and we're in laptop mode: schedule writeback | 
 | 1329 |  * of all dirty data a few seconds from now.  If the flush is already scheduled | 
 | 1330 |  * then push it back - the user is still using the disk. | 
 | 1331 |  */ | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 1332 | void laptop_io_completion(struct backing_dev_info *info) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | { | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 1334 | 	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1335 | } | 
 | 1336 |  | 
 | 1337 | /* | 
 | 1338 |  * We're in laptop mode and we've just synced. The sync's writes will have | 
 | 1339 |  * caused another writeback to be scheduled by laptop_io_completion. | 
 | 1340 |  * Nothing needs to be written back anymore, so we unschedule the writeback. | 
 | 1341 |  */ | 
 | 1342 | void laptop_sync_completion(void) | 
 | 1343 | { | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 1344 | 	struct backing_dev_info *bdi; | 
 | 1345 |  | 
 | 1346 | 	rcu_read_lock(); | 
 | 1347 |  | 
 | 1348 | 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) | 
 | 1349 | 		del_timer(&bdi->laptop_mode_wb_timer); | 
 | 1350 |  | 
 | 1351 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1352 | } | 
| Jens Axboe | c2c4986 | 2010-05-20 09:18:47 +0200 | [diff] [blame] | 1353 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 |  | 
 | 1355 | /* | 
 | 1356 |  * If ratelimit_pages is too high then we can get into dirty-data overload | 
 | 1357 |  * if a large number of processes all perform writes at the same time. | 
 | 1358 |  * If it is too low then SMP machines will call the (expensive) | 
 | 1359 |  * get_writeback_state too often. | 
 | 1360 |  * | 
 | 1361 |  * Here we set ratelimit_pages to a level which ensures that when all CPUs are | 
 | 1362 |  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1363 |  * thresholds. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 |  */ | 
 | 1365 |  | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 1366 | void writeback_set_ratelimit(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1367 | { | 
| Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1368 | 	unsigned long background_thresh; | 
 | 1369 | 	unsigned long dirty_thresh; | 
 | 1370 | 	global_dirty_limits(&background_thresh, &dirty_thresh); | 
 | 1371 | 	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | 	if (ratelimit_pages < 16) | 
 | 1373 | 		ratelimit_pages = 16; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1374 | } | 
 | 1375 |  | 
| Chandra Seetharaman | 26c2143 | 2006-06-27 02:54:10 -0700 | [diff] [blame] | 1376 | static int __cpuinit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | 
 | 1378 | { | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 1379 | 	writeback_set_ratelimit(); | 
| Paul E. McKenney | aa0f030 | 2007-02-10 01:46:37 -0800 | [diff] [blame] | 1380 | 	return NOTIFY_DONE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | } | 
 | 1382 |  | 
| Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 1383 | static struct notifier_block __cpuinitdata ratelimit_nb = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1384 | 	.notifier_call	= ratelimit_handler, | 
 | 1385 | 	.next		= NULL, | 
 | 1386 | }; | 
 | 1387 |  | 
 | 1388 | /* | 
| Linus Torvalds | dc6e29d | 2007-01-29 16:37:38 -0800 | [diff] [blame] | 1389 |  * Called early on to tune the page writeback dirty limits. | 
 | 1390 |  * | 
 | 1391 |  * We used to scale dirty pages according to how total memory | 
 | 1392 |  * related to pages that could be allocated for buffers (by | 
 | 1393 |  * comparing nr_free_buffer_pages() to vm_total_pages. | 
 | 1394 |  * | 
 | 1395 |  * However, that was when we used "dirty_ratio" to scale with | 
 | 1396 |  * all memory, and we don't do that any more. "dirty_ratio" | 
 | 1397 |  * is now applied to total non-HIGHPAGE memory (by subtracting | 
 | 1398 |  * totalhigh_pages from vm_total_pages), and as such we can't | 
 | 1399 |  * get into the old insane situation any more where we had | 
 | 1400 |  * large amounts of dirty pages compared to a small amount of | 
 | 1401 |  * non-HIGHMEM memory. | 
 | 1402 |  * | 
 | 1403 |  * But we might still want to scale the dirty_ratio by how | 
 | 1404 |  * much memory the box has.. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 |  */ | 
 | 1406 | void __init page_writeback_init(void) | 
 | 1407 | { | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1408 | 	int shift; | 
 | 1409 |  | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 1410 | 	writeback_set_ratelimit(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | 	register_cpu_notifier(&ratelimit_nb); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1412 |  | 
 | 1413 | 	shift = calc_period_shift(); | 
 | 1414 | 	prop_descriptor_init(&vm_completions, shift); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | } | 
 | 1416 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1417 | /** | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1418 |  * tag_pages_for_writeback - tag pages to be written by write_cache_pages | 
 | 1419 |  * @mapping: address space structure to write | 
 | 1420 |  * @start: starting page index | 
 | 1421 |  * @end: ending page index (inclusive) | 
 | 1422 |  * | 
 | 1423 |  * This function scans the page range from @start to @end (inclusive) and tags | 
 | 1424 |  * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is | 
 | 1425 |  * that write_cache_pages (or whoever calls this function) will then use | 
 | 1426 |  * TOWRITE tag to identify pages eligible for writeback.  This mechanism is | 
 | 1427 |  * used to avoid livelocking of writeback by a process steadily creating new | 
 | 1428 |  * dirty pages in the file (thus it is important for this function to be quick | 
 | 1429 |  * so that it can tag pages faster than a dirtying process can create them). | 
 | 1430 |  */ | 
 | 1431 | /* | 
 | 1432 |  * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency. | 
 | 1433 |  */ | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1434 | void tag_pages_for_writeback(struct address_space *mapping, | 
 | 1435 | 			     pgoff_t start, pgoff_t end) | 
 | 1436 | { | 
| Randy Dunlap | 3c111a0 | 2010-08-11 14:17:30 -0700 | [diff] [blame] | 1437 | #define WRITEBACK_TAG_BATCH 4096 | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1438 | 	unsigned long tagged; | 
 | 1439 |  | 
 | 1440 | 	do { | 
 | 1441 | 		spin_lock_irq(&mapping->tree_lock); | 
 | 1442 | 		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, | 
 | 1443 | 				&start, end, WRITEBACK_TAG_BATCH, | 
 | 1444 | 				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); | 
 | 1445 | 		spin_unlock_irq(&mapping->tree_lock); | 
 | 1446 | 		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); | 
 | 1447 | 		cond_resched(); | 
| Jan Kara | d5ed3a4 | 2010-08-19 14:13:33 -0700 | [diff] [blame] | 1448 | 		/* We check 'start' to handle wrapping when end == ~0UL */ | 
 | 1449 | 	} while (tagged >= WRITEBACK_TAG_BATCH && start); | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1450 | } | 
 | 1451 | EXPORT_SYMBOL(tag_pages_for_writeback); | 
 | 1452 |  | 
 | 1453 | /** | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1454 |  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1455 |  * @mapping: address space structure to write | 
 | 1456 |  * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1457 |  * @writepage: function called for each page | 
 | 1458 |  * @data: data passed to writepage function | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1459 |  * | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1460 |  * If a page is already under I/O, write_cache_pages() skips it, even | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1461 |  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback, | 
 | 1462 |  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync() | 
 | 1463 |  * and msync() need to guarantee that all the data which was dirty at the time | 
 | 1464 |  * the call was made get new I/O started against them.  If wbc->sync_mode is | 
 | 1465 |  * WB_SYNC_ALL then we were called for data integrity and we must wait for | 
 | 1466 |  * existing IO to complete. | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1467 |  * | 
 | 1468 |  * To avoid livelocks (when other process dirties new pages), we first tag | 
 | 1469 |  * pages which should be written back with TOWRITE tag and only then start | 
 | 1470 |  * writing them. For data-integrity sync we have to be careful so that we do | 
 | 1471 |  * not miss some pages (e.g., because some other process has cleared TOWRITE | 
 | 1472 |  * tag we set). The rule we follow is that TOWRITE tag can be cleared only | 
 | 1473 |  * by the process clearing the DIRTY tag (and submitting the page for IO). | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1474 |  */ | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1475 | int write_cache_pages(struct address_space *mapping, | 
 | 1476 | 		      struct writeback_control *wbc, writepage_t writepage, | 
 | 1477 | 		      void *data) | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1478 | { | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1479 | 	int ret = 0; | 
 | 1480 | 	int done = 0; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1481 | 	struct pagevec pvec; | 
 | 1482 | 	int nr_pages; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1483 | 	pgoff_t uninitialized_var(writeback_index); | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1484 | 	pgoff_t index; | 
 | 1485 | 	pgoff_t end;		/* Inclusive */ | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 1486 | 	pgoff_t done_index; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1487 | 	int cycled; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1488 | 	int range_whole = 0; | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1489 | 	int tag; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1490 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1491 | 	pagevec_init(&pvec, 0); | 
 | 1492 | 	if (wbc->range_cyclic) { | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1493 | 		writeback_index = mapping->writeback_index; /* prev offset */ | 
 | 1494 | 		index = writeback_index; | 
 | 1495 | 		if (index == 0) | 
 | 1496 | 			cycled = 1; | 
 | 1497 | 		else | 
 | 1498 | 			cycled = 0; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1499 | 		end = -1; | 
 | 1500 | 	} else { | 
 | 1501 | 		index = wbc->range_start >> PAGE_CACHE_SHIFT; | 
 | 1502 | 		end = wbc->range_end >> PAGE_CACHE_SHIFT; | 
 | 1503 | 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | 
 | 1504 | 			range_whole = 1; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1505 | 		cycled = 1; /* ignore range_cyclic tests */ | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1506 | 	} | 
| Wu Fengguang | 6e6938b | 2010-06-06 10:38:15 -0600 | [diff] [blame] | 1507 | 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1508 | 		tag = PAGECACHE_TAG_TOWRITE; | 
 | 1509 | 	else | 
 | 1510 | 		tag = PAGECACHE_TAG_DIRTY; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1511 | retry: | 
| Wu Fengguang | 6e6938b | 2010-06-06 10:38:15 -0600 | [diff] [blame] | 1512 | 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1513 | 		tag_pages_for_writeback(mapping, index, end); | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 1514 | 	done_index = index; | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 1515 | 	while (!done && (index <= end)) { | 
 | 1516 | 		int i; | 
 | 1517 |  | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 1518 | 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 1519 | 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | 
 | 1520 | 		if (nr_pages == 0) | 
 | 1521 | 			break; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1522 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1523 | 		for (i = 0; i < nr_pages; i++) { | 
 | 1524 | 			struct page *page = pvec.pages[i]; | 
 | 1525 |  | 
| Nick Piggin | d5482cd | 2009-01-06 14:39:11 -0800 | [diff] [blame] | 1526 | 			/* | 
 | 1527 | 			 * At this point, the page may be truncated or | 
 | 1528 | 			 * invalidated (changing page->mapping to NULL), or | 
 | 1529 | 			 * even swizzled back from swapper_space to tmpfs file | 
 | 1530 | 			 * mapping. However, page->index will not change | 
 | 1531 | 			 * because we have a reference on the page. | 
 | 1532 | 			 */ | 
 | 1533 | 			if (page->index > end) { | 
 | 1534 | 				/* | 
 | 1535 | 				 * can't be range_cyclic (1st pass) because | 
 | 1536 | 				 * end == -1 in that case. | 
 | 1537 | 				 */ | 
 | 1538 | 				done = 1; | 
 | 1539 | 				break; | 
 | 1540 | 			} | 
 | 1541 |  | 
| Jun'ichi Nomura | cf15b07 | 2011-03-22 16:33:40 -0700 | [diff] [blame] | 1542 | 			done_index = page->index; | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 1543 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1544 | 			lock_page(page); | 
 | 1545 |  | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 1546 | 			/* | 
 | 1547 | 			 * Page truncated or invalidated. We can freely skip it | 
 | 1548 | 			 * then, even for data integrity operations: the page | 
 | 1549 | 			 * has disappeared concurrently, so there could be no | 
 | 1550 | 			 * real expectation of this data interity operation | 
 | 1551 | 			 * even if there is now a new, dirty page at the same | 
 | 1552 | 			 * pagecache address. | 
 | 1553 | 			 */ | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1554 | 			if (unlikely(page->mapping != mapping)) { | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 1555 | continue_unlock: | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1556 | 				unlock_page(page); | 
 | 1557 | 				continue; | 
 | 1558 | 			} | 
 | 1559 |  | 
| Nick Piggin | 515f4a0 | 2009-01-06 14:39:10 -0800 | [diff] [blame] | 1560 | 			if (!PageDirty(page)) { | 
 | 1561 | 				/* someone wrote it for us */ | 
 | 1562 | 				goto continue_unlock; | 
 | 1563 | 			} | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1564 |  | 
| Nick Piggin | 515f4a0 | 2009-01-06 14:39:10 -0800 | [diff] [blame] | 1565 | 			if (PageWriteback(page)) { | 
 | 1566 | 				if (wbc->sync_mode != WB_SYNC_NONE) | 
 | 1567 | 					wait_on_page_writeback(page); | 
 | 1568 | 				else | 
 | 1569 | 					goto continue_unlock; | 
 | 1570 | 			} | 
 | 1571 |  | 
 | 1572 | 			BUG_ON(PageWriteback(page)); | 
 | 1573 | 			if (!clear_page_dirty_for_io(page)) | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 1574 | 				goto continue_unlock; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1575 |  | 
| Dave Chinner | 9e09438 | 2010-07-07 13:24:08 +1000 | [diff] [blame] | 1576 | 			trace_wbc_writepage(wbc, mapping->backing_dev_info); | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1577 | 			ret = (*writepage)(page, wbc, data); | 
| Nick Piggin | 0026677 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 1578 | 			if (unlikely(ret)) { | 
 | 1579 | 				if (ret == AOP_WRITEPAGE_ACTIVATE) { | 
 | 1580 | 					unlock_page(page); | 
 | 1581 | 					ret = 0; | 
 | 1582 | 				} else { | 
 | 1583 | 					/* | 
 | 1584 | 					 * done_index is set past this page, | 
 | 1585 | 					 * so media errors will not choke | 
 | 1586 | 					 * background writeout for the entire | 
 | 1587 | 					 * file. This has consequences for | 
 | 1588 | 					 * range_cyclic semantics (ie. it may | 
 | 1589 | 					 * not be suitable for data integrity | 
 | 1590 | 					 * writeout). | 
 | 1591 | 					 */ | 
| Jun'ichi Nomura | cf15b07 | 2011-03-22 16:33:40 -0700 | [diff] [blame] | 1592 | 					done_index = page->index + 1; | 
| Nick Piggin | 0026677 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 1593 | 					done = 1; | 
 | 1594 | 					break; | 
 | 1595 | 				} | 
| Dave Chinner | 0b56492 | 2010-06-09 10:37:18 +1000 | [diff] [blame] | 1596 | 			} | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1597 |  | 
| Dave Chinner | 546a192 | 2010-08-24 11:44:34 +1000 | [diff] [blame] | 1598 | 			/* | 
 | 1599 | 			 * We stop writing back only if we are not doing | 
 | 1600 | 			 * integrity sync. In case of integrity sync we have to | 
 | 1601 | 			 * keep going until we have written all the pages | 
 | 1602 | 			 * we tagged for writeback prior to entering this loop. | 
 | 1603 | 			 */ | 
 | 1604 | 			if (--wbc->nr_to_write <= 0 && | 
 | 1605 | 			    wbc->sync_mode == WB_SYNC_NONE) { | 
 | 1606 | 				done = 1; | 
 | 1607 | 				break; | 
| Nick Piggin | 05fe478 | 2009-01-06 14:39:08 -0800 | [diff] [blame] | 1608 | 			} | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1609 | 		} | 
 | 1610 | 		pagevec_release(&pvec); | 
 | 1611 | 		cond_resched(); | 
 | 1612 | 	} | 
| Nick Piggin | 3a4c680 | 2009-02-12 04:34:23 +0100 | [diff] [blame] | 1613 | 	if (!cycled && !done) { | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1614 | 		/* | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1615 | 		 * range_cyclic: | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1616 | 		 * We hit the last page and there is more work to be done: wrap | 
 | 1617 | 		 * back to the start of the file | 
 | 1618 | 		 */ | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1619 | 		cycled = 1; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1620 | 		index = 0; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 1621 | 		end = writeback_index - 1; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1622 | 		goto retry; | 
 | 1623 | 	} | 
| Dave Chinner | 0b56492 | 2010-06-09 10:37:18 +1000 | [diff] [blame] | 1624 | 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | 
 | 1625 | 		mapping->writeback_index = done_index; | 
| Aneesh Kumar K.V | 06d6cf69 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1626 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1627 | 	return ret; | 
 | 1628 | } | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1629 | EXPORT_SYMBOL(write_cache_pages); | 
 | 1630 |  | 
 | 1631 | /* | 
 | 1632 |  * Function used by generic_writepages to call the real writepage | 
 | 1633 |  * function and set the mapping flags on error | 
 | 1634 |  */ | 
 | 1635 | static int __writepage(struct page *page, struct writeback_control *wbc, | 
 | 1636 | 		       void *data) | 
 | 1637 | { | 
 | 1638 | 	struct address_space *mapping = data; | 
 | 1639 | 	int ret = mapping->a_ops->writepage(page, wbc); | 
 | 1640 | 	mapping_set_error(mapping, ret); | 
 | 1641 | 	return ret; | 
 | 1642 | } | 
 | 1643 |  | 
 | 1644 | /** | 
 | 1645 |  * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. | 
 | 1646 |  * @mapping: address space structure to write | 
 | 1647 |  * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
 | 1648 |  * | 
 | 1649 |  * This is a library function, which implements the writepages() | 
 | 1650 |  * address_space_operation. | 
 | 1651 |  */ | 
 | 1652 | int generic_writepages(struct address_space *mapping, | 
 | 1653 | 		       struct writeback_control *wbc) | 
 | 1654 | { | 
| Shaohua Li | 9b6096a | 2011-03-17 10:47:06 +0100 | [diff] [blame] | 1655 | 	struct blk_plug plug; | 
 | 1656 | 	int ret; | 
 | 1657 |  | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1658 | 	/* deal with chardevs and other special file */ | 
 | 1659 | 	if (!mapping->a_ops->writepage) | 
 | 1660 | 		return 0; | 
 | 1661 |  | 
| Shaohua Li | 9b6096a | 2011-03-17 10:47:06 +0100 | [diff] [blame] | 1662 | 	blk_start_plug(&plug); | 
 | 1663 | 	ret = write_cache_pages(mapping, wbc, __writepage, mapping); | 
 | 1664 | 	blk_finish_plug(&plug); | 
 | 1665 | 	return ret; | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 1666 | } | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1667 |  | 
 | 1668 | EXPORT_SYMBOL(generic_writepages); | 
 | 1669 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1670 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) | 
 | 1671 | { | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1672 | 	int ret; | 
 | 1673 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1674 | 	if (wbc->nr_to_write <= 0) | 
 | 1675 | 		return 0; | 
 | 1676 | 	if (mapping->a_ops->writepages) | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1677 | 		ret = mapping->a_ops->writepages(mapping, wbc); | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1678 | 	else | 
 | 1679 | 		ret = generic_writepages(mapping, wbc); | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1680 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1681 | } | 
 | 1682 |  | 
 | 1683 | /** | 
 | 1684 |  * write_one_page - write out a single page and optionally wait on I/O | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 1685 |  * @page: the page to write | 
 | 1686 |  * @wait: if true, wait on writeout | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 |  * | 
 | 1688 |  * The page must be locked by the caller and will be unlocked upon return. | 
 | 1689 |  * | 
 | 1690 |  * write_one_page() returns a negative error code if I/O failed. | 
 | 1691 |  */ | 
 | 1692 | int write_one_page(struct page *page, int wait) | 
 | 1693 | { | 
 | 1694 | 	struct address_space *mapping = page->mapping; | 
 | 1695 | 	int ret = 0; | 
 | 1696 | 	struct writeback_control wbc = { | 
 | 1697 | 		.sync_mode = WB_SYNC_ALL, | 
 | 1698 | 		.nr_to_write = 1, | 
 | 1699 | 	}; | 
 | 1700 |  | 
 | 1701 | 	BUG_ON(!PageLocked(page)); | 
 | 1702 |  | 
 | 1703 | 	if (wait) | 
 | 1704 | 		wait_on_page_writeback(page); | 
 | 1705 |  | 
 | 1706 | 	if (clear_page_dirty_for_io(page)) { | 
 | 1707 | 		page_cache_get(page); | 
 | 1708 | 		ret = mapping->a_ops->writepage(page, &wbc); | 
 | 1709 | 		if (ret == 0 && wait) { | 
 | 1710 | 			wait_on_page_writeback(page); | 
 | 1711 | 			if (PageError(page)) | 
 | 1712 | 				ret = -EIO; | 
 | 1713 | 		} | 
 | 1714 | 		page_cache_release(page); | 
 | 1715 | 	} else { | 
 | 1716 | 		unlock_page(page); | 
 | 1717 | 	} | 
 | 1718 | 	return ret; | 
 | 1719 | } | 
 | 1720 | EXPORT_SYMBOL(write_one_page); | 
 | 1721 |  | 
 | 1722 | /* | 
| Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 1723 |  * For address_spaces which do not use buffers nor write back. | 
 | 1724 |  */ | 
 | 1725 | int __set_page_dirty_no_writeback(struct page *page) | 
 | 1726 | { | 
 | 1727 | 	if (!PageDirty(page)) | 
| Bob Liu | c3f0da6 | 2011-01-13 15:45:49 -0800 | [diff] [blame] | 1728 | 		return !TestSetPageDirty(page); | 
| Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 1729 | 	return 0; | 
 | 1730 | } | 
 | 1731 |  | 
 | 1732 | /* | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1733 |  * Helper function for set_page_dirty family. | 
 | 1734 |  * NOTE: This relies on being atomic wrt interrupts. | 
 | 1735 |  */ | 
 | 1736 | void account_page_dirtied(struct page *page, struct address_space *mapping) | 
 | 1737 | { | 
 | 1738 | 	if (mapping_cap_account_dirty(mapping)) { | 
 | 1739 | 		__inc_zone_page_state(page, NR_FILE_DIRTY); | 
| Michael Rubin | ea941f0 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 1740 | 		__inc_zone_page_state(page, NR_DIRTIED); | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1741 | 		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 
| Wu Fengguang | c8e28ce | 2011-01-23 10:07:47 -0600 | [diff] [blame] | 1742 | 		__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1743 | 		task_io_account_write(PAGE_CACHE_SIZE); | 
 | 1744 | 	} | 
 | 1745 | } | 
| Michael Rubin | 679ceac | 2010-08-20 02:31:26 -0700 | [diff] [blame] | 1746 | EXPORT_SYMBOL(account_page_dirtied); | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1747 |  | 
 | 1748 | /* | 
| Michael Rubin | f629d1c | 2010-10-26 14:21:33 -0700 | [diff] [blame] | 1749 |  * Helper function for set_page_writeback family. | 
 | 1750 |  * NOTE: Unlike account_page_dirtied this does not rely on being atomic | 
 | 1751 |  * wrt interrupts. | 
 | 1752 |  */ | 
 | 1753 | void account_page_writeback(struct page *page) | 
 | 1754 | { | 
 | 1755 | 	inc_zone_page_state(page, NR_WRITEBACK); | 
 | 1756 | } | 
 | 1757 | EXPORT_SYMBOL(account_page_writeback); | 
 | 1758 |  | 
 | 1759 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1760 |  * For address_spaces which do not use buffers.  Just tag the page as dirty in | 
 | 1761 |  * its radix tree. | 
 | 1762 |  * | 
 | 1763 |  * This is also used when a single buffer is being dirtied: we want to set the | 
 | 1764 |  * page dirty in that case, but not all the buffers.  This is a "bottom-up" | 
 | 1765 |  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | 
 | 1766 |  * | 
 | 1767 |  * Most callers have locked the page, which pins the address_space in memory. | 
 | 1768 |  * But zap_pte_range() does not lock the page, however in that case the | 
 | 1769 |  * mapping is pinned by the vma's ->vm_file reference. | 
 | 1770 |  * | 
 | 1771 |  * We take care to handle the case where the page was truncated from the | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 1772 |  * mapping by re-checking page_mapping() inside tree_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1773 |  */ | 
 | 1774 | int __set_page_dirty_nobuffers(struct page *page) | 
 | 1775 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 | 	if (!TestSetPageDirty(page)) { | 
 | 1777 | 		struct address_space *mapping = page_mapping(page); | 
 | 1778 | 		struct address_space *mapping2; | 
 | 1779 |  | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1780 | 		if (!mapping) | 
 | 1781 | 			return 1; | 
 | 1782 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1783 | 		spin_lock_irq(&mapping->tree_lock); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1784 | 		mapping2 = page_mapping(page); | 
 | 1785 | 		if (mapping2) { /* Race with truncate? */ | 
 | 1786 | 			BUG_ON(mapping2 != mapping); | 
| Nick Piggin | 787d221 | 2007-07-17 04:03:34 -0700 | [diff] [blame] | 1787 | 			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1788 | 			account_page_dirtied(page, mapping); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1789 | 			radix_tree_tag_set(&mapping->page_tree, | 
 | 1790 | 				page_index(page), PAGECACHE_TAG_DIRTY); | 
 | 1791 | 		} | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1792 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1793 | 		if (mapping->host) { | 
 | 1794 | 			/* !PageAnon && !swapper_space */ | 
 | 1795 | 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1796 | 		} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1797 | 		return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | 	} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1799 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 | } | 
 | 1801 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | 
 | 1802 |  | 
 | 1803 | /* | 
 | 1804 |  * When a writepage implementation decides that it doesn't want to write this | 
 | 1805 |  * page for some reason, it should redirty the locked page via | 
 | 1806 |  * redirty_page_for_writepage() and it should then unlock the page and return 0 | 
 | 1807 |  */ | 
 | 1808 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | 
 | 1809 | { | 
 | 1810 | 	wbc->pages_skipped++; | 
 | 1811 | 	return __set_page_dirty_nobuffers(page); | 
 | 1812 | } | 
 | 1813 | EXPORT_SYMBOL(redirty_page_for_writepage); | 
 | 1814 |  | 
 | 1815 | /* | 
| Wu Fengguang | 6746aff | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 1816 |  * Dirty a page. | 
 | 1817 |  * | 
 | 1818 |  * For pages with a mapping this should be done under the page lock | 
 | 1819 |  * for the benefit of asynchronous memory errors who prefer a consistent | 
 | 1820 |  * dirty state. This rule can be broken in some special cases, | 
 | 1821 |  * but should be better not to. | 
 | 1822 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1823 |  * If the mapping doesn't provide a set_page_dirty a_op, then | 
 | 1824 |  * just fall through and assume that it wants buffer_heads. | 
 | 1825 |  */ | 
| Nick Piggin | 1cf6e7d | 2009-02-18 14:48:18 -0800 | [diff] [blame] | 1826 | int set_page_dirty(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 | { | 
 | 1828 | 	struct address_space *mapping = page_mapping(page); | 
 | 1829 |  | 
 | 1830 | 	if (likely(mapping)) { | 
 | 1831 | 		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | 
| Minchan Kim | 278df9f | 2011-03-22 16:32:54 -0700 | [diff] [blame] | 1832 | 		/* | 
 | 1833 | 		 * readahead/lru_deactivate_page could remain | 
 | 1834 | 		 * PG_readahead/PG_reclaim due to race with end_page_writeback | 
 | 1835 | 		 * About readahead, if the page is written, the flags would be | 
 | 1836 | 		 * reset. So no problem. | 
 | 1837 | 		 * About lru_deactivate_page, if the page is redirty, the flag | 
 | 1838 | 		 * will be reset. So no problem. but if the page is used by readahead | 
 | 1839 | 		 * it will confuse readahead and make it restart the size rampup | 
 | 1840 | 		 * process. But it's a trivial problem. | 
 | 1841 | 		 */ | 
 | 1842 | 		ClearPageReclaim(page); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1843 | #ifdef CONFIG_BLOCK | 
 | 1844 | 		if (!spd) | 
 | 1845 | 			spd = __set_page_dirty_buffers; | 
 | 1846 | #endif | 
 | 1847 | 		return (*spd)(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1848 | 	} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1849 | 	if (!PageDirty(page)) { | 
 | 1850 | 		if (!TestSetPageDirty(page)) | 
 | 1851 | 			return 1; | 
 | 1852 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1853 | 	return 0; | 
 | 1854 | } | 
 | 1855 | EXPORT_SYMBOL(set_page_dirty); | 
 | 1856 |  | 
 | 1857 | /* | 
 | 1858 |  * set_page_dirty() is racy if the caller has no reference against | 
 | 1859 |  * page->mapping->host, and if the page is unlocked.  This is because another | 
 | 1860 |  * CPU could truncate the page off the mapping and then free the mapping. | 
 | 1861 |  * | 
 | 1862 |  * Usually, the page _is_ locked, or the caller is a user-space process which | 
 | 1863 |  * holds a reference on the inode by having an open file. | 
 | 1864 |  * | 
 | 1865 |  * In other cases, the page should be locked before running set_page_dirty(). | 
 | 1866 |  */ | 
 | 1867 | int set_page_dirty_lock(struct page *page) | 
 | 1868 | { | 
 | 1869 | 	int ret; | 
 | 1870 |  | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 1871 | 	lock_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1872 | 	ret = set_page_dirty(page); | 
 | 1873 | 	unlock_page(page); | 
 | 1874 | 	return ret; | 
 | 1875 | } | 
 | 1876 | EXPORT_SYMBOL(set_page_dirty_lock); | 
 | 1877 |  | 
 | 1878 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1879 |  * Clear a page's dirty flag, while caring for dirty memory accounting. | 
 | 1880 |  * Returns true if the page was previously dirty. | 
 | 1881 |  * | 
 | 1882 |  * This is for preparing to put the page under writeout.  We leave the page | 
 | 1883 |  * tagged as dirty in the radix tree so that a concurrent write-for-sync | 
 | 1884 |  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage | 
 | 1885 |  * implementation will run either set_page_writeback() or set_page_dirty(), | 
 | 1886 |  * at which stage we bring the page's dirty flag and radix-tree dirty tag | 
 | 1887 |  * back into sync. | 
 | 1888 |  * | 
 | 1889 |  * This incoherency between the page's dirty flag and radix-tree tag is | 
 | 1890 |  * unfortunate, but it only exists while the page is locked. | 
 | 1891 |  */ | 
 | 1892 | int clear_page_dirty_for_io(struct page *page) | 
 | 1893 | { | 
 | 1894 | 	struct address_space *mapping = page_mapping(page); | 
 | 1895 |  | 
| Nick Piggin | 7935289 | 2007-07-19 01:47:22 -0700 | [diff] [blame] | 1896 | 	BUG_ON(!PageLocked(page)); | 
 | 1897 |  | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1898 | 	if (mapping && mapping_cap_account_dirty(mapping)) { | 
 | 1899 | 		/* | 
 | 1900 | 		 * Yes, Virginia, this is indeed insane. | 
 | 1901 | 		 * | 
 | 1902 | 		 * We use this sequence to make sure that | 
 | 1903 | 		 *  (a) we account for dirty stats properly | 
 | 1904 | 		 *  (b) we tell the low-level filesystem to | 
 | 1905 | 		 *      mark the whole page dirty if it was | 
 | 1906 | 		 *      dirty in a pagetable. Only to then | 
 | 1907 | 		 *  (c) clean the page again and return 1 to | 
 | 1908 | 		 *      cause the writeback. | 
 | 1909 | 		 * | 
 | 1910 | 		 * This way we avoid all nasty races with the | 
 | 1911 | 		 * dirty bit in multiple places and clearing | 
 | 1912 | 		 * them concurrently from different threads. | 
 | 1913 | 		 * | 
 | 1914 | 		 * Note! Normally the "set_page_dirty(page)" | 
 | 1915 | 		 * has no effect on the actual dirty bit - since | 
 | 1916 | 		 * that will already usually be set. But we | 
 | 1917 | 		 * need the side effects, and it can help us | 
 | 1918 | 		 * avoid races. | 
 | 1919 | 		 * | 
 | 1920 | 		 * We basically use the page "master dirty bit" | 
 | 1921 | 		 * as a serialization point for all the different | 
 | 1922 | 		 * threads doing their things. | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1923 | 		 */ | 
 | 1924 | 		if (page_mkclean(page)) | 
 | 1925 | 			set_page_dirty(page); | 
| Nick Piggin | 7935289 | 2007-07-19 01:47:22 -0700 | [diff] [blame] | 1926 | 		/* | 
 | 1927 | 		 * We carefully synchronise fault handlers against | 
 | 1928 | 		 * installing a dirty pte and marking the page dirty | 
 | 1929 | 		 * at this point. We do this by having them hold the | 
 | 1930 | 		 * page lock at some point after installing their | 
 | 1931 | 		 * pte, but before marking the page dirty. | 
 | 1932 | 		 * Pages are always locked coming in here, so we get | 
 | 1933 | 		 * the desired exclusion. See mm/memory.c:do_wp_page() | 
 | 1934 | 		 * for more comments. | 
 | 1935 | 		 */ | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1936 | 		if (TestClearPageDirty(page)) { | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1937 | 			dec_zone_page_state(page, NR_FILE_DIRTY); | 
| Peter Zijlstra | c9e51e4 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 1938 | 			dec_bdi_stat(mapping->backing_dev_info, | 
 | 1939 | 					BDI_RECLAIMABLE); | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1940 | 			return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1941 | 		} | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1942 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1943 | 	} | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1944 | 	return TestClearPageDirty(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1945 | } | 
| Hans Reiser | 58bb01a | 2005-11-18 01:10:53 -0800 | [diff] [blame] | 1946 | EXPORT_SYMBOL(clear_page_dirty_for_io); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1947 |  | 
 | 1948 | int test_clear_page_writeback(struct page *page) | 
 | 1949 | { | 
 | 1950 | 	struct address_space *mapping = page_mapping(page); | 
 | 1951 | 	int ret; | 
 | 1952 |  | 
 | 1953 | 	if (mapping) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1954 | 		struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1955 | 		unsigned long flags; | 
 | 1956 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1957 | 		spin_lock_irqsave(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1958 | 		ret = TestClearPageWriteback(page); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1959 | 		if (ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1960 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 1961 | 						page_index(page), | 
 | 1962 | 						PAGECACHE_TAG_WRITEBACK); | 
| Miklos Szeredi | e4ad08f | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 1963 | 			if (bdi_cap_account_writeback(bdi)) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1964 | 				__dec_bdi_stat(bdi, BDI_WRITEBACK); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1965 | 				__bdi_writeout_inc(bdi); | 
 | 1966 | 			} | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1967 | 		} | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1968 | 		spin_unlock_irqrestore(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | 	} else { | 
 | 1970 | 		ret = TestClearPageWriteback(page); | 
 | 1971 | 	} | 
| Wu Fengguang | 99b12e3 | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1972 | 	if (ret) { | 
| Andrew Morton | d688abf | 2007-07-19 01:49:17 -0700 | [diff] [blame] | 1973 | 		dec_zone_page_state(page, NR_WRITEBACK); | 
| Wu Fengguang | 99b12e3 | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1974 | 		inc_zone_page_state(page, NR_WRITTEN); | 
 | 1975 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1976 | 	return ret; | 
 | 1977 | } | 
 | 1978 |  | 
 | 1979 | int test_set_page_writeback(struct page *page) | 
 | 1980 | { | 
 | 1981 | 	struct address_space *mapping = page_mapping(page); | 
 | 1982 | 	int ret; | 
 | 1983 |  | 
 | 1984 | 	if (mapping) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1985 | 		struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1986 | 		unsigned long flags; | 
 | 1987 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1988 | 		spin_lock_irqsave(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1989 | 		ret = TestSetPageWriteback(page); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1990 | 		if (!ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1991 | 			radix_tree_tag_set(&mapping->page_tree, | 
 | 1992 | 						page_index(page), | 
 | 1993 | 						PAGECACHE_TAG_WRITEBACK); | 
| Miklos Szeredi | e4ad08f | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 1994 | 			if (bdi_cap_account_writeback(bdi)) | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1995 | 				__inc_bdi_stat(bdi, BDI_WRITEBACK); | 
 | 1996 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1997 | 		if (!PageDirty(page)) | 
 | 1998 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 1999 | 						page_index(page), | 
 | 2000 | 						PAGECACHE_TAG_DIRTY); | 
| Jan Kara | f446daa | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 2001 | 		radix_tree_tag_clear(&mapping->page_tree, | 
 | 2002 | 				     page_index(page), | 
 | 2003 | 				     PAGECACHE_TAG_TOWRITE); | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 2004 | 		spin_unlock_irqrestore(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2005 | 	} else { | 
 | 2006 | 		ret = TestSetPageWriteback(page); | 
 | 2007 | 	} | 
| Andrew Morton | d688abf | 2007-07-19 01:49:17 -0700 | [diff] [blame] | 2008 | 	if (!ret) | 
| Michael Rubin | f629d1c | 2010-10-26 14:21:33 -0700 | [diff] [blame] | 2009 | 		account_page_writeback(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2010 | 	return ret; | 
 | 2011 |  | 
 | 2012 | } | 
 | 2013 | EXPORT_SYMBOL(test_set_page_writeback); | 
 | 2014 |  | 
 | 2015 | /* | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 2016 |  * Return true if any of the pages in the mapping are marked with the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2017 |  * passed tag. | 
 | 2018 |  */ | 
 | 2019 | int mapping_tagged(struct address_space *mapping, int tag) | 
 | 2020 | { | 
| Konstantin Khlebnikov | 72c4783 | 2011-07-25 17:12:31 -0700 | [diff] [blame] | 2021 | 	return radix_tree_tagged(&mapping->page_tree, tag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2022 | } | 
 | 2023 | EXPORT_SYMBOL(mapping_tagged); |