| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * mm/page-writeback.c. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
 | 5 |  * | 
 | 6 |  * Contains functions related to writing back dirty pages at the | 
 | 7 |  * address_space level. | 
 | 8 |  * | 
 | 9 |  * 10Apr2002	akpm@zip.com.au | 
 | 10 |  *		Initial version | 
 | 11 |  */ | 
 | 12 |  | 
 | 13 | #include <linux/kernel.h> | 
 | 14 | #include <linux/module.h> | 
 | 15 | #include <linux/spinlock.h> | 
 | 16 | #include <linux/fs.h> | 
 | 17 | #include <linux/mm.h> | 
 | 18 | #include <linux/swap.h> | 
 | 19 | #include <linux/slab.h> | 
 | 20 | #include <linux/pagemap.h> | 
 | 21 | #include <linux/writeback.h> | 
 | 22 | #include <linux/init.h> | 
 | 23 | #include <linux/backing-dev.h> | 
 | 24 | #include <linux/blkdev.h> | 
 | 25 | #include <linux/mpage.h> | 
 | 26 | #include <linux/percpu.h> | 
 | 27 | #include <linux/notifier.h> | 
 | 28 | #include <linux/smp.h> | 
 | 29 | #include <linux/sysctl.h> | 
 | 30 | #include <linux/cpu.h> | 
 | 31 | #include <linux/syscalls.h> | 
 | 32 |  | 
 | 33 | /* | 
 | 34 |  * The maximum number of pages to writeout in a single bdflush/kupdate | 
 | 35 |  * operation.  We do this so we don't hold I_LOCK against an inode for | 
 | 36 |  * enormous amounts of time, which would block a userspace task which has | 
 | 37 |  * been forced to throttle against that inode.  Also, the code reevaluates | 
 | 38 |  * the dirty each time it has written this many pages. | 
 | 39 |  */ | 
 | 40 | #define MAX_WRITEBACK_PAGES	1024 | 
 | 41 |  | 
 | 42 | /* | 
 | 43 |  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | 
 | 44 |  * will look to see if it needs to force writeback or throttling. | 
 | 45 |  */ | 
 | 46 | static long ratelimit_pages = 32; | 
 | 47 |  | 
 | 48 | static long total_pages;	/* The total number of pages in the machine. */ | 
 | 49 | static int dirty_exceeded;	/* Dirty mem may be over limit */ | 
 | 50 |  | 
 | 51 | /* | 
 | 52 |  * When balance_dirty_pages decides that the caller needs to perform some | 
 | 53 |  * non-background writeback, this is how many pages it will attempt to write. | 
 | 54 |  * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably | 
 | 55 |  * large amounts of I/O are submitted. | 
 | 56 |  */ | 
 | 57 | static inline long sync_writeback_pages(void) | 
 | 58 | { | 
 | 59 | 	return ratelimit_pages + ratelimit_pages / 2; | 
 | 60 | } | 
 | 61 |  | 
 | 62 | /* The following parameters are exported via /proc/sys/vm */ | 
 | 63 |  | 
 | 64 | /* | 
 | 65 |  * Start background writeback (via pdflush) at this percentage | 
 | 66 |  */ | 
 | 67 | int dirty_background_ratio = 10; | 
 | 68 |  | 
 | 69 | /* | 
 | 70 |  * The generator of dirty data starts writeback at this percentage | 
 | 71 |  */ | 
 | 72 | int vm_dirty_ratio = 40; | 
 | 73 |  | 
 | 74 | /* | 
 | 75 |  * The interval between `kupdate'-style writebacks, in centiseconds | 
 | 76 |  * (hundredths of a second) | 
 | 77 |  */ | 
 | 78 | int dirty_writeback_centisecs = 5 * 100; | 
 | 79 |  | 
 | 80 | /* | 
 | 81 |  * The longest number of centiseconds for which data is allowed to remain dirty | 
 | 82 |  */ | 
 | 83 | int dirty_expire_centisecs = 30 * 100; | 
 | 84 |  | 
 | 85 | /* | 
 | 86 |  * Flag that makes the machine dump writes/reads and block dirtyings. | 
 | 87 |  */ | 
 | 88 | int block_dump; | 
 | 89 |  | 
 | 90 | /* | 
 | 91 |  * Flag that puts the machine in "laptop mode". | 
 | 92 |  */ | 
 | 93 | int laptop_mode; | 
 | 94 |  | 
 | 95 | EXPORT_SYMBOL(laptop_mode); | 
 | 96 |  | 
 | 97 | /* End of sysctl-exported parameters */ | 
 | 98 |  | 
 | 99 |  | 
 | 100 | static void background_writeout(unsigned long _min_pages); | 
 | 101 |  | 
 | 102 | struct writeback_state | 
 | 103 | { | 
 | 104 | 	unsigned long nr_dirty; | 
 | 105 | 	unsigned long nr_unstable; | 
 | 106 | 	unsigned long nr_mapped; | 
 | 107 | 	unsigned long nr_writeback; | 
 | 108 | }; | 
 | 109 |  | 
 | 110 | static void get_writeback_state(struct writeback_state *wbs) | 
 | 111 | { | 
 | 112 | 	wbs->nr_dirty = read_page_state(nr_dirty); | 
 | 113 | 	wbs->nr_unstable = read_page_state(nr_unstable); | 
 | 114 | 	wbs->nr_mapped = read_page_state(nr_mapped); | 
 | 115 | 	wbs->nr_writeback = read_page_state(nr_writeback); | 
 | 116 | } | 
 | 117 |  | 
 | 118 | /* | 
 | 119 |  * Work out the current dirty-memory clamping and background writeout | 
 | 120 |  * thresholds. | 
 | 121 |  * | 
 | 122 |  * The main aim here is to lower them aggressively if there is a lot of mapped | 
 | 123 |  * memory around.  To avoid stressing page reclaim with lots of unreclaimable | 
 | 124 |  * pages.  It is better to clamp down on writers than to start swapping, and | 
 | 125 |  * performing lots of scanning. | 
 | 126 |  * | 
 | 127 |  * We only allow 1/2 of the currently-unmapped memory to be dirtied. | 
 | 128 |  * | 
 | 129 |  * We don't permit the clamping level to fall below 5% - that is getting rather | 
 | 130 |  * excessive. | 
 | 131 |  * | 
 | 132 |  * We make sure that the background writeout level is below the adjusted | 
 | 133 |  * clamping level. | 
 | 134 |  */ | 
 | 135 | static void | 
 | 136 | get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, | 
 | 137 | 		struct address_space *mapping) | 
 | 138 | { | 
 | 139 | 	int background_ratio;		/* Percentages */ | 
 | 140 | 	int dirty_ratio; | 
 | 141 | 	int unmapped_ratio; | 
 | 142 | 	long background; | 
 | 143 | 	long dirty; | 
 | 144 | 	unsigned long available_memory = total_pages; | 
 | 145 | 	struct task_struct *tsk; | 
 | 146 |  | 
 | 147 | 	get_writeback_state(wbs); | 
 | 148 |  | 
 | 149 | #ifdef CONFIG_HIGHMEM | 
 | 150 | 	/* | 
 | 151 | 	 * If this mapping can only allocate from low memory, | 
 | 152 | 	 * we exclude high memory from our count. | 
 | 153 | 	 */ | 
 | 154 | 	if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM)) | 
 | 155 | 		available_memory -= totalhigh_pages; | 
 | 156 | #endif | 
 | 157 |  | 
 | 158 |  | 
 | 159 | 	unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; | 
 | 160 |  | 
 | 161 | 	dirty_ratio = vm_dirty_ratio; | 
 | 162 | 	if (dirty_ratio > unmapped_ratio / 2) | 
 | 163 | 		dirty_ratio = unmapped_ratio / 2; | 
 | 164 |  | 
 | 165 | 	if (dirty_ratio < 5) | 
 | 166 | 		dirty_ratio = 5; | 
 | 167 |  | 
 | 168 | 	background_ratio = dirty_background_ratio; | 
 | 169 | 	if (background_ratio >= dirty_ratio) | 
 | 170 | 		background_ratio = dirty_ratio / 2; | 
 | 171 |  | 
 | 172 | 	background = (background_ratio * available_memory) / 100; | 
 | 173 | 	dirty = (dirty_ratio * available_memory) / 100; | 
 | 174 | 	tsk = current; | 
 | 175 | 	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | 
 | 176 | 		background += background / 4; | 
 | 177 | 		dirty += dirty / 4; | 
 | 178 | 	} | 
 | 179 | 	*pbackground = background; | 
 | 180 | 	*pdirty = dirty; | 
 | 181 | } | 
 | 182 |  | 
 | 183 | /* | 
 | 184 |  * balance_dirty_pages() must be called by processes which are generating dirty | 
 | 185 |  * data.  It looks at the number of dirty pages in the machine and will force | 
 | 186 |  * the caller to perform writeback if the system is over `vm_dirty_ratio'. | 
 | 187 |  * If we're over `background_thresh' then pdflush is woken to perform some | 
 | 188 |  * writeout. | 
 | 189 |  */ | 
 | 190 | static void balance_dirty_pages(struct address_space *mapping) | 
 | 191 | { | 
 | 192 | 	struct writeback_state wbs; | 
 | 193 | 	long nr_reclaimable; | 
 | 194 | 	long background_thresh; | 
 | 195 | 	long dirty_thresh; | 
 | 196 | 	unsigned long pages_written = 0; | 
 | 197 | 	unsigned long write_chunk = sync_writeback_pages(); | 
 | 198 |  | 
 | 199 | 	struct backing_dev_info *bdi = mapping->backing_dev_info; | 
 | 200 |  | 
 | 201 | 	for (;;) { | 
 | 202 | 		struct writeback_control wbc = { | 
 | 203 | 			.bdi		= bdi, | 
 | 204 | 			.sync_mode	= WB_SYNC_NONE, | 
 | 205 | 			.older_than_this = NULL, | 
 | 206 | 			.nr_to_write	= write_chunk, | 
 | 207 | 		}; | 
 | 208 |  | 
 | 209 | 		get_dirty_limits(&wbs, &background_thresh, | 
 | 210 | 					&dirty_thresh, mapping); | 
 | 211 | 		nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; | 
 | 212 | 		if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | 
 | 213 | 			break; | 
 | 214 |  | 
 | 215 | 		dirty_exceeded = 1; | 
 | 216 |  | 
 | 217 | 		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable. | 
 | 218 | 		 * Unstable writes are a feature of certain networked | 
 | 219 | 		 * filesystems (i.e. NFS) in which data may have been | 
 | 220 | 		 * written to the server's write cache, but has not yet | 
 | 221 | 		 * been flushed to permanent storage. | 
 | 222 | 		 */ | 
 | 223 | 		if (nr_reclaimable) { | 
 | 224 | 			writeback_inodes(&wbc); | 
 | 225 | 			get_dirty_limits(&wbs, &background_thresh, | 
 | 226 | 					&dirty_thresh, mapping); | 
 | 227 | 			nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; | 
 | 228 | 			if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | 
 | 229 | 				break; | 
 | 230 | 			pages_written += write_chunk - wbc.nr_to_write; | 
 | 231 | 			if (pages_written >= write_chunk) | 
 | 232 | 				break;		/* We've done our duty */ | 
 | 233 | 		} | 
 | 234 | 		blk_congestion_wait(WRITE, HZ/10); | 
 | 235 | 	} | 
 | 236 |  | 
 | 237 | 	if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | 
 | 238 | 		dirty_exceeded = 0; | 
 | 239 |  | 
 | 240 | 	if (writeback_in_progress(bdi)) | 
 | 241 | 		return;		/* pdflush is already working this queue */ | 
 | 242 |  | 
 | 243 | 	/* | 
 | 244 | 	 * In laptop mode, we wait until hitting the higher threshold before | 
 | 245 | 	 * starting background writeout, and then write out all the way down | 
 | 246 | 	 * to the lower threshold.  So slow writers cause minimal disk activity. | 
 | 247 | 	 * | 
 | 248 | 	 * In normal mode, we start background writeout at the lower | 
 | 249 | 	 * background_thresh, to keep the amount of dirty memory low. | 
 | 250 | 	 */ | 
 | 251 | 	if ((laptop_mode && pages_written) || | 
 | 252 | 	     (!laptop_mode && (nr_reclaimable > background_thresh))) | 
 | 253 | 		pdflush_operation(background_writeout, 0); | 
 | 254 | } | 
 | 255 |  | 
 | 256 | /** | 
 | 257 |  * balance_dirty_pages_ratelimited - balance dirty memory state | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 258 |  * @mapping: address_space which was dirtied | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 |  * | 
 | 260 |  * Processes which are dirtying memory should call in here once for each page | 
 | 261 |  * which was newly dirtied.  The function will periodically check the system's | 
 | 262 |  * dirty state and will initiate writeback if needed. | 
 | 263 |  * | 
 | 264 |  * On really big machines, get_writeback_state is expensive, so try to avoid | 
 | 265 |  * calling it too often (ratelimiting).  But once we're over the dirty memory | 
 | 266 |  * limit we decrease the ratelimiting by a lot, to prevent individual processes | 
 | 267 |  * from overshooting the limit by (ratelimit_pages) each. | 
 | 268 |  */ | 
 | 269 | void balance_dirty_pages_ratelimited(struct address_space *mapping) | 
 | 270 | { | 
 | 271 | 	static DEFINE_PER_CPU(int, ratelimits) = 0; | 
 | 272 | 	long ratelimit; | 
 | 273 |  | 
 | 274 | 	ratelimit = ratelimit_pages; | 
 | 275 | 	if (dirty_exceeded) | 
 | 276 | 		ratelimit = 8; | 
 | 277 |  | 
 | 278 | 	/* | 
 | 279 | 	 * Check the rate limiting. Also, we do not want to throttle real-time | 
 | 280 | 	 * tasks in balance_dirty_pages(). Period. | 
 | 281 | 	 */ | 
 | 282 | 	if (get_cpu_var(ratelimits)++ >= ratelimit) { | 
 | 283 | 		__get_cpu_var(ratelimits) = 0; | 
 | 284 | 		put_cpu_var(ratelimits); | 
 | 285 | 		balance_dirty_pages(mapping); | 
 | 286 | 		return; | 
 | 287 | 	} | 
 | 288 | 	put_cpu_var(ratelimits); | 
 | 289 | } | 
 | 290 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited); | 
 | 291 |  | 
 | 292 | void throttle_vm_writeout(void) | 
 | 293 | { | 
 | 294 | 	struct writeback_state wbs; | 
 | 295 | 	long background_thresh; | 
 | 296 | 	long dirty_thresh; | 
 | 297 |  | 
 | 298 |         for ( ; ; ) { | 
 | 299 | 		get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); | 
 | 300 |  | 
 | 301 |                 /* | 
 | 302 |                  * Boost the allowable dirty threshold a bit for page | 
 | 303 |                  * allocators so they don't get DoS'ed by heavy writers | 
 | 304 |                  */ | 
 | 305 |                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */ | 
 | 306 |  | 
 | 307 |                 if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) | 
 | 308 |                         break; | 
 | 309 |                 blk_congestion_wait(WRITE, HZ/10); | 
 | 310 |         } | 
 | 311 | } | 
 | 312 |  | 
 | 313 |  | 
 | 314 | /* | 
 | 315 |  * writeback at least _min_pages, and keep writing until the amount of dirty | 
 | 316 |  * memory is less than the background threshold, or until we're all clean. | 
 | 317 |  */ | 
 | 318 | static void background_writeout(unsigned long _min_pages) | 
 | 319 | { | 
 | 320 | 	long min_pages = _min_pages; | 
 | 321 | 	struct writeback_control wbc = { | 
 | 322 | 		.bdi		= NULL, | 
 | 323 | 		.sync_mode	= WB_SYNC_NONE, | 
 | 324 | 		.older_than_this = NULL, | 
 | 325 | 		.nr_to_write	= 0, | 
 | 326 | 		.nonblocking	= 1, | 
 | 327 | 	}; | 
 | 328 |  | 
 | 329 | 	for ( ; ; ) { | 
 | 330 | 		struct writeback_state wbs; | 
 | 331 | 		long background_thresh; | 
 | 332 | 		long dirty_thresh; | 
 | 333 |  | 
 | 334 | 		get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); | 
 | 335 | 		if (wbs.nr_dirty + wbs.nr_unstable < background_thresh | 
 | 336 | 				&& min_pages <= 0) | 
 | 337 | 			break; | 
 | 338 | 		wbc.encountered_congestion = 0; | 
 | 339 | 		wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 
 | 340 | 		wbc.pages_skipped = 0; | 
 | 341 | 		writeback_inodes(&wbc); | 
 | 342 | 		min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 
 | 343 | 		if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { | 
 | 344 | 			/* Wrote less than expected */ | 
 | 345 | 			blk_congestion_wait(WRITE, HZ/10); | 
 | 346 | 			if (!wbc.encountered_congestion) | 
 | 347 | 				break; | 
 | 348 | 		} | 
 | 349 | 	} | 
 | 350 | } | 
 | 351 |  | 
 | 352 | /* | 
 | 353 |  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back | 
 | 354 |  * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns | 
 | 355 |  * -1 if all pdflush threads were busy. | 
 | 356 |  */ | 
| Pekka J Enberg | 687a21c | 2005-06-28 20:44:55 -0700 | [diff] [blame] | 357 | int wakeup_pdflush(long nr_pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | { | 
 | 359 | 	if (nr_pages == 0) { | 
 | 360 | 		struct writeback_state wbs; | 
 | 361 |  | 
 | 362 | 		get_writeback_state(&wbs); | 
 | 363 | 		nr_pages = wbs.nr_dirty + wbs.nr_unstable; | 
 | 364 | 	} | 
 | 365 | 	return pdflush_operation(background_writeout, nr_pages); | 
 | 366 | } | 
 | 367 |  | 
 | 368 | static void wb_timer_fn(unsigned long unused); | 
 | 369 | static void laptop_timer_fn(unsigned long unused); | 
 | 370 |  | 
| Ingo Molnar | 8d06afa | 2005-09-09 13:10:40 -0700 | [diff] [blame] | 371 | static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); | 
 | 372 | static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 |  | 
 | 374 | /* | 
 | 375 |  * Periodic writeback of "old" data. | 
 | 376 |  * | 
 | 377 |  * Define "old": the first time one of an inode's pages is dirtied, we mark the | 
 | 378 |  * dirtying-time in the inode's address_space.  So this periodic writeback code | 
 | 379 |  * just walks the superblock inode list, writing back any inodes which are | 
 | 380 |  * older than a specific point in time. | 
 | 381 |  * | 
 | 382 |  * Try to run once per dirty_writeback_centisecs.  But if a writeback event | 
 | 383 |  * takes longer than a dirty_writeback_centisecs interval, then leave a | 
 | 384 |  * one-second gap. | 
 | 385 |  * | 
 | 386 |  * older_than_this takes precedence over nr_to_write.  So we'll only write back | 
 | 387 |  * all dirty pages if they are all attached to "old" mappings. | 
 | 388 |  */ | 
 | 389 | static void wb_kupdate(unsigned long arg) | 
 | 390 | { | 
 | 391 | 	unsigned long oldest_jif; | 
 | 392 | 	unsigned long start_jif; | 
 | 393 | 	unsigned long next_jif; | 
 | 394 | 	long nr_to_write; | 
 | 395 | 	struct writeback_state wbs; | 
 | 396 | 	struct writeback_control wbc = { | 
 | 397 | 		.bdi		= NULL, | 
 | 398 | 		.sync_mode	= WB_SYNC_NONE, | 
 | 399 | 		.older_than_this = &oldest_jif, | 
 | 400 | 		.nr_to_write	= 0, | 
 | 401 | 		.nonblocking	= 1, | 
 | 402 | 		.for_kupdate	= 1, | 
 | 403 | 	}; | 
 | 404 |  | 
 | 405 | 	sync_supers(); | 
 | 406 |  | 
 | 407 | 	get_writeback_state(&wbs); | 
 | 408 | 	oldest_jif = jiffies - (dirty_expire_centisecs * HZ) / 100; | 
 | 409 | 	start_jif = jiffies; | 
 | 410 | 	next_jif = start_jif + (dirty_writeback_centisecs * HZ) / 100; | 
 | 411 | 	nr_to_write = wbs.nr_dirty + wbs.nr_unstable + | 
 | 412 | 			(inodes_stat.nr_inodes - inodes_stat.nr_unused); | 
 | 413 | 	while (nr_to_write > 0) { | 
 | 414 | 		wbc.encountered_congestion = 0; | 
 | 415 | 		wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 
 | 416 | 		writeback_inodes(&wbc); | 
 | 417 | 		if (wbc.nr_to_write > 0) { | 
 | 418 | 			if (wbc.encountered_congestion) | 
 | 419 | 				blk_congestion_wait(WRITE, HZ/10); | 
 | 420 | 			else | 
 | 421 | 				break;	/* All the old data is written */ | 
 | 422 | 		} | 
 | 423 | 		nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 
 | 424 | 	} | 
 | 425 | 	if (time_before(next_jif, jiffies + HZ)) | 
 | 426 | 		next_jif = jiffies + HZ; | 
 | 427 | 	if (dirty_writeback_centisecs) | 
 | 428 | 		mod_timer(&wb_timer, next_jif); | 
 | 429 | } | 
 | 430 |  | 
 | 431 | /* | 
 | 432 |  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | 
 | 433 |  */ | 
 | 434 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | 
 | 435 | 		struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 436 | { | 
 | 437 | 	proc_dointvec(table, write, file, buffer, length, ppos); | 
 | 438 | 	if (dirty_writeback_centisecs) { | 
 | 439 | 		mod_timer(&wb_timer, | 
 | 440 | 			jiffies + (dirty_writeback_centisecs * HZ) / 100); | 
 | 441 | 	} else { | 
 | 442 | 		del_timer(&wb_timer); | 
 | 443 | 	} | 
 | 444 | 	return 0; | 
 | 445 | } | 
 | 446 |  | 
 | 447 | static void wb_timer_fn(unsigned long unused) | 
 | 448 | { | 
 | 449 | 	if (pdflush_operation(wb_kupdate, 0) < 0) | 
 | 450 | 		mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ | 
 | 451 | } | 
 | 452 |  | 
 | 453 | static void laptop_flush(unsigned long unused) | 
 | 454 | { | 
 | 455 | 	sys_sync(); | 
 | 456 | } | 
 | 457 |  | 
 | 458 | static void laptop_timer_fn(unsigned long unused) | 
 | 459 | { | 
 | 460 | 	pdflush_operation(laptop_flush, 0); | 
 | 461 | } | 
 | 462 |  | 
 | 463 | /* | 
 | 464 |  * We've spun up the disk and we're in laptop mode: schedule writeback | 
 | 465 |  * of all dirty data a few seconds from now.  If the flush is already scheduled | 
 | 466 |  * then push it back - the user is still using the disk. | 
 | 467 |  */ | 
 | 468 | void laptop_io_completion(void) | 
 | 469 | { | 
 | 470 | 	mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode * HZ); | 
 | 471 | } | 
 | 472 |  | 
 | 473 | /* | 
 | 474 |  * We're in laptop mode and we've just synced. The sync's writes will have | 
 | 475 |  * caused another writeback to be scheduled by laptop_io_completion. | 
 | 476 |  * Nothing needs to be written back anymore, so we unschedule the writeback. | 
 | 477 |  */ | 
 | 478 | void laptop_sync_completion(void) | 
 | 479 | { | 
 | 480 | 	del_timer(&laptop_mode_wb_timer); | 
 | 481 | } | 
 | 482 |  | 
 | 483 | /* | 
 | 484 |  * If ratelimit_pages is too high then we can get into dirty-data overload | 
 | 485 |  * if a large number of processes all perform writes at the same time. | 
 | 486 |  * If it is too low then SMP machines will call the (expensive) | 
 | 487 |  * get_writeback_state too often. | 
 | 488 |  * | 
 | 489 |  * Here we set ratelimit_pages to a level which ensures that when all CPUs are | 
 | 490 |  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | 
 | 491 |  * thresholds before writeback cuts in. | 
 | 492 |  * | 
 | 493 |  * But the limit should not be set too high.  Because it also controls the | 
 | 494 |  * amount of memory which the balance_dirty_pages() caller has to write back. | 
 | 495 |  * If this is too large then the caller will block on the IO queue all the | 
 | 496 |  * time.  So limit it to four megabytes - the balance_dirty_pages() caller | 
 | 497 |  * will write six megabyte chunks, max. | 
 | 498 |  */ | 
 | 499 |  | 
 | 500 | static void set_ratelimit(void) | 
 | 501 | { | 
 | 502 | 	ratelimit_pages = total_pages / (num_online_cpus() * 32); | 
 | 503 | 	if (ratelimit_pages < 16) | 
 | 504 | 		ratelimit_pages = 16; | 
 | 505 | 	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | 
 | 506 | 		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | 
 | 507 | } | 
 | 508 |  | 
 | 509 | static int | 
 | 510 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | 
 | 511 | { | 
 | 512 | 	set_ratelimit(); | 
 | 513 | 	return 0; | 
 | 514 | } | 
 | 515 |  | 
 | 516 | static struct notifier_block ratelimit_nb = { | 
 | 517 | 	.notifier_call	= ratelimit_handler, | 
 | 518 | 	.next		= NULL, | 
 | 519 | }; | 
 | 520 |  | 
 | 521 | /* | 
 | 522 |  * If the machine has a large highmem:lowmem ratio then scale back the default | 
 | 523 |  * dirty memory thresholds: allowing too much dirty highmem pins an excessive | 
 | 524 |  * number of buffer_heads. | 
 | 525 |  */ | 
 | 526 | void __init page_writeback_init(void) | 
 | 527 | { | 
 | 528 | 	long buffer_pages = nr_free_buffer_pages(); | 
 | 529 | 	long correction; | 
 | 530 |  | 
 | 531 | 	total_pages = nr_free_pagecache_pages(); | 
 | 532 |  | 
 | 533 | 	correction = (100 * 4 * buffer_pages) / total_pages; | 
 | 534 |  | 
 | 535 | 	if (correction < 100) { | 
 | 536 | 		dirty_background_ratio *= correction; | 
 | 537 | 		dirty_background_ratio /= 100; | 
 | 538 | 		vm_dirty_ratio *= correction; | 
 | 539 | 		vm_dirty_ratio /= 100; | 
 | 540 |  | 
 | 541 | 		if (dirty_background_ratio <= 0) | 
 | 542 | 			dirty_background_ratio = 1; | 
 | 543 | 		if (vm_dirty_ratio <= 0) | 
 | 544 | 			vm_dirty_ratio = 1; | 
 | 545 | 	} | 
 | 546 | 	mod_timer(&wb_timer, jiffies + (dirty_writeback_centisecs * HZ) / 100); | 
 | 547 | 	set_ratelimit(); | 
 | 548 | 	register_cpu_notifier(&ratelimit_nb); | 
 | 549 | } | 
 | 550 |  | 
 | 551 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) | 
 | 552 | { | 
 | 553 | 	if (wbc->nr_to_write <= 0) | 
 | 554 | 		return 0; | 
 | 555 | 	if (mapping->a_ops->writepages) | 
 | 556 | 		return mapping->a_ops->writepages(mapping, wbc); | 
 | 557 | 	return generic_writepages(mapping, wbc); | 
 | 558 | } | 
 | 559 |  | 
 | 560 | /** | 
 | 561 |  * write_one_page - write out a single page and optionally wait on I/O | 
 | 562 |  * | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 563 |  * @page: the page to write | 
 | 564 |  * @wait: if true, wait on writeout | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 |  * | 
 | 566 |  * The page must be locked by the caller and will be unlocked upon return. | 
 | 567 |  * | 
 | 568 |  * write_one_page() returns a negative error code if I/O failed. | 
 | 569 |  */ | 
 | 570 | int write_one_page(struct page *page, int wait) | 
 | 571 | { | 
 | 572 | 	struct address_space *mapping = page->mapping; | 
 | 573 | 	int ret = 0; | 
 | 574 | 	struct writeback_control wbc = { | 
 | 575 | 		.sync_mode = WB_SYNC_ALL, | 
 | 576 | 		.nr_to_write = 1, | 
 | 577 | 	}; | 
 | 578 |  | 
 | 579 | 	BUG_ON(!PageLocked(page)); | 
 | 580 |  | 
 | 581 | 	if (wait) | 
 | 582 | 		wait_on_page_writeback(page); | 
 | 583 |  | 
 | 584 | 	if (clear_page_dirty_for_io(page)) { | 
 | 585 | 		page_cache_get(page); | 
 | 586 | 		ret = mapping->a_ops->writepage(page, &wbc); | 
 | 587 | 		if (ret == 0 && wait) { | 
 | 588 | 			wait_on_page_writeback(page); | 
 | 589 | 			if (PageError(page)) | 
 | 590 | 				ret = -EIO; | 
 | 591 | 		} | 
 | 592 | 		page_cache_release(page); | 
 | 593 | 	} else { | 
 | 594 | 		unlock_page(page); | 
 | 595 | 	} | 
 | 596 | 	return ret; | 
 | 597 | } | 
 | 598 | EXPORT_SYMBOL(write_one_page); | 
 | 599 |  | 
 | 600 | /* | 
 | 601 |  * For address_spaces which do not use buffers.  Just tag the page as dirty in | 
 | 602 |  * its radix tree. | 
 | 603 |  * | 
 | 604 |  * This is also used when a single buffer is being dirtied: we want to set the | 
 | 605 |  * page dirty in that case, but not all the buffers.  This is a "bottom-up" | 
 | 606 |  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | 
 | 607 |  * | 
 | 608 |  * Most callers have locked the page, which pins the address_space in memory. | 
 | 609 |  * But zap_pte_range() does not lock the page, however in that case the | 
 | 610 |  * mapping is pinned by the vma's ->vm_file reference. | 
 | 611 |  * | 
 | 612 |  * We take care to handle the case where the page was truncated from the | 
 | 613 |  * mapping by re-checking page_mapping() insode tree_lock. | 
 | 614 |  */ | 
 | 615 | int __set_page_dirty_nobuffers(struct page *page) | 
 | 616 | { | 
 | 617 | 	int ret = 0; | 
 | 618 |  | 
 | 619 | 	if (!TestSetPageDirty(page)) { | 
 | 620 | 		struct address_space *mapping = page_mapping(page); | 
 | 621 | 		struct address_space *mapping2; | 
 | 622 |  | 
 | 623 | 		if (mapping) { | 
 | 624 | 			write_lock_irq(&mapping->tree_lock); | 
 | 625 | 			mapping2 = page_mapping(page); | 
 | 626 | 			if (mapping2) { /* Race with truncate? */ | 
 | 627 | 				BUG_ON(mapping2 != mapping); | 
 | 628 | 				if (mapping_cap_account_dirty(mapping)) | 
 | 629 | 					inc_page_state(nr_dirty); | 
 | 630 | 				radix_tree_tag_set(&mapping->page_tree, | 
 | 631 | 					page_index(page), PAGECACHE_TAG_DIRTY); | 
 | 632 | 			} | 
 | 633 | 			write_unlock_irq(&mapping->tree_lock); | 
 | 634 | 			if (mapping->host) { | 
 | 635 | 				/* !PageAnon && !swapper_space */ | 
 | 636 | 				__mark_inode_dirty(mapping->host, | 
 | 637 | 							I_DIRTY_PAGES); | 
 | 638 | 			} | 
 | 639 | 		} | 
 | 640 | 	} | 
 | 641 | 	return ret; | 
 | 642 | } | 
 | 643 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | 
 | 644 |  | 
 | 645 | /* | 
 | 646 |  * When a writepage implementation decides that it doesn't want to write this | 
 | 647 |  * page for some reason, it should redirty the locked page via | 
 | 648 |  * redirty_page_for_writepage() and it should then unlock the page and return 0 | 
 | 649 |  */ | 
 | 650 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | 
 | 651 | { | 
 | 652 | 	wbc->pages_skipped++; | 
 | 653 | 	return __set_page_dirty_nobuffers(page); | 
 | 654 | } | 
 | 655 | EXPORT_SYMBOL(redirty_page_for_writepage); | 
 | 656 |  | 
 | 657 | /* | 
 | 658 |  * If the mapping doesn't provide a set_page_dirty a_op, then | 
 | 659 |  * just fall through and assume that it wants buffer_heads. | 
 | 660 |  */ | 
 | 661 | int fastcall set_page_dirty(struct page *page) | 
 | 662 | { | 
 | 663 | 	struct address_space *mapping = page_mapping(page); | 
 | 664 |  | 
 | 665 | 	if (likely(mapping)) { | 
 | 666 | 		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | 
 | 667 | 		if (spd) | 
 | 668 | 			return (*spd)(page); | 
 | 669 | 		return __set_page_dirty_buffers(page); | 
 | 670 | 	} | 
 | 671 | 	if (!PageDirty(page)) | 
 | 672 | 		SetPageDirty(page); | 
 | 673 | 	return 0; | 
 | 674 | } | 
 | 675 | EXPORT_SYMBOL(set_page_dirty); | 
 | 676 |  | 
 | 677 | /* | 
 | 678 |  * set_page_dirty() is racy if the caller has no reference against | 
 | 679 |  * page->mapping->host, and if the page is unlocked.  This is because another | 
 | 680 |  * CPU could truncate the page off the mapping and then free the mapping. | 
 | 681 |  * | 
 | 682 |  * Usually, the page _is_ locked, or the caller is a user-space process which | 
 | 683 |  * holds a reference on the inode by having an open file. | 
 | 684 |  * | 
 | 685 |  * In other cases, the page should be locked before running set_page_dirty(). | 
 | 686 |  */ | 
 | 687 | int set_page_dirty_lock(struct page *page) | 
 | 688 | { | 
 | 689 | 	int ret; | 
 | 690 |  | 
 | 691 | 	lock_page(page); | 
 | 692 | 	ret = set_page_dirty(page); | 
 | 693 | 	unlock_page(page); | 
 | 694 | 	return ret; | 
 | 695 | } | 
 | 696 | EXPORT_SYMBOL(set_page_dirty_lock); | 
 | 697 |  | 
 | 698 | /* | 
 | 699 |  * Clear a page's dirty flag, while caring for dirty memory accounting.  | 
 | 700 |  * Returns true if the page was previously dirty. | 
 | 701 |  */ | 
 | 702 | int test_clear_page_dirty(struct page *page) | 
 | 703 | { | 
 | 704 | 	struct address_space *mapping = page_mapping(page); | 
 | 705 | 	unsigned long flags; | 
 | 706 |  | 
 | 707 | 	if (mapping) { | 
 | 708 | 		write_lock_irqsave(&mapping->tree_lock, flags); | 
 | 709 | 		if (TestClearPageDirty(page)) { | 
 | 710 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 711 | 						page_index(page), | 
 | 712 | 						PAGECACHE_TAG_DIRTY); | 
 | 713 | 			write_unlock_irqrestore(&mapping->tree_lock, flags); | 
 | 714 | 			if (mapping_cap_account_dirty(mapping)) | 
 | 715 | 				dec_page_state(nr_dirty); | 
 | 716 | 			return 1; | 
 | 717 | 		} | 
 | 718 | 		write_unlock_irqrestore(&mapping->tree_lock, flags); | 
 | 719 | 		return 0; | 
 | 720 | 	} | 
 | 721 | 	return TestClearPageDirty(page); | 
 | 722 | } | 
 | 723 | EXPORT_SYMBOL(test_clear_page_dirty); | 
 | 724 |  | 
 | 725 | /* | 
 | 726 |  * Clear a page's dirty flag, while caring for dirty memory accounting. | 
 | 727 |  * Returns true if the page was previously dirty. | 
 | 728 |  * | 
 | 729 |  * This is for preparing to put the page under writeout.  We leave the page | 
 | 730 |  * tagged as dirty in the radix tree so that a concurrent write-for-sync | 
 | 731 |  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage | 
 | 732 |  * implementation will run either set_page_writeback() or set_page_dirty(), | 
 | 733 |  * at which stage we bring the page's dirty flag and radix-tree dirty tag | 
 | 734 |  * back into sync. | 
 | 735 |  * | 
 | 736 |  * This incoherency between the page's dirty flag and radix-tree tag is | 
 | 737 |  * unfortunate, but it only exists while the page is locked. | 
 | 738 |  */ | 
 | 739 | int clear_page_dirty_for_io(struct page *page) | 
 | 740 | { | 
 | 741 | 	struct address_space *mapping = page_mapping(page); | 
 | 742 |  | 
 | 743 | 	if (mapping) { | 
 | 744 | 		if (TestClearPageDirty(page)) { | 
 | 745 | 			if (mapping_cap_account_dirty(mapping)) | 
 | 746 | 				dec_page_state(nr_dirty); | 
 | 747 | 			return 1; | 
 | 748 | 		} | 
 | 749 | 		return 0; | 
 | 750 | 	} | 
 | 751 | 	return TestClearPageDirty(page); | 
 | 752 | } | 
 | 753 | EXPORT_SYMBOL(clear_page_dirty_for_io); | 
 | 754 |  | 
 | 755 | int test_clear_page_writeback(struct page *page) | 
 | 756 | { | 
 | 757 | 	struct address_space *mapping = page_mapping(page); | 
 | 758 | 	int ret; | 
 | 759 |  | 
 | 760 | 	if (mapping) { | 
 | 761 | 		unsigned long flags; | 
 | 762 |  | 
 | 763 | 		write_lock_irqsave(&mapping->tree_lock, flags); | 
 | 764 | 		ret = TestClearPageWriteback(page); | 
 | 765 | 		if (ret) | 
 | 766 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 767 | 						page_index(page), | 
 | 768 | 						PAGECACHE_TAG_WRITEBACK); | 
 | 769 | 		write_unlock_irqrestore(&mapping->tree_lock, flags); | 
 | 770 | 	} else { | 
 | 771 | 		ret = TestClearPageWriteback(page); | 
 | 772 | 	} | 
 | 773 | 	return ret; | 
 | 774 | } | 
 | 775 |  | 
 | 776 | int test_set_page_writeback(struct page *page) | 
 | 777 | { | 
 | 778 | 	struct address_space *mapping = page_mapping(page); | 
 | 779 | 	int ret; | 
 | 780 |  | 
 | 781 | 	if (mapping) { | 
 | 782 | 		unsigned long flags; | 
 | 783 |  | 
 | 784 | 		write_lock_irqsave(&mapping->tree_lock, flags); | 
 | 785 | 		ret = TestSetPageWriteback(page); | 
 | 786 | 		if (!ret) | 
 | 787 | 			radix_tree_tag_set(&mapping->page_tree, | 
 | 788 | 						page_index(page), | 
 | 789 | 						PAGECACHE_TAG_WRITEBACK); | 
 | 790 | 		if (!PageDirty(page)) | 
 | 791 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 792 | 						page_index(page), | 
 | 793 | 						PAGECACHE_TAG_DIRTY); | 
 | 794 | 		write_unlock_irqrestore(&mapping->tree_lock, flags); | 
 | 795 | 	} else { | 
 | 796 | 		ret = TestSetPageWriteback(page); | 
 | 797 | 	} | 
 | 798 | 	return ret; | 
 | 799 |  | 
 | 800 | } | 
 | 801 | EXPORT_SYMBOL(test_set_page_writeback); | 
 | 802 |  | 
 | 803 | /* | 
 | 804 |  * Return true if any of the pages in the mapping are marged with the | 
 | 805 |  * passed tag. | 
 | 806 |  */ | 
 | 807 | int mapping_tagged(struct address_space *mapping, int tag) | 
 | 808 | { | 
 | 809 | 	unsigned long flags; | 
 | 810 | 	int ret; | 
 | 811 |  | 
 | 812 | 	read_lock_irqsave(&mapping->tree_lock, flags); | 
 | 813 | 	ret = radix_tree_tagged(&mapping->page_tree, tag); | 
 | 814 | 	read_unlock_irqrestore(&mapping->tree_lock, flags); | 
 | 815 | 	return ret; | 
 | 816 | } | 
 | 817 | EXPORT_SYMBOL(mapping_tagged); |