| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	linux/mm/filemap.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 1994-1999  Linus Torvalds | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | /* | 
 | 8 |  * This file handles the generic file mmap semantics used by | 
 | 9 |  * most "normal" filesystems (but you don't /have/ to use this: | 
 | 10 |  * the NFS filesystem used to do this differently, for example) | 
 | 11 |  */ | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 12 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/compiler.h> | 
 | 14 | #include <linux/fs.h> | 
| Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 15 | #include <linux/uaccess.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/aio.h> | 
| Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 17 | #include <linux/capability.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/kernel_stat.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 19 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/mm.h> | 
 | 21 | #include <linux/swap.h> | 
 | 22 | #include <linux/mman.h> | 
 | 23 | #include <linux/pagemap.h> | 
 | 24 | #include <linux/file.h> | 
 | 25 | #include <linux/uio.h> | 
 | 26 | #include <linux/hash.h> | 
 | 27 | #include <linux/writeback.h> | 
| Linus Torvalds | 5325338 | 2007-10-18 14:47:32 -0700 | [diff] [blame] | 28 | #include <linux/backing-dev.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/pagevec.h> | 
 | 30 | #include <linux/blkdev.h> | 
 | 31 | #include <linux/security.h> | 
 | 32 | #include <linux/syscalls.h> | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 33 | #include <linux/cpuset.h> | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 34 | #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 35 | #include <linux/memcontrol.h> | 
| Dan Magenheimer | c515e1f | 2011-05-26 10:01:43 -0600 | [diff] [blame] | 36 | #include <linux/cleancache.h> | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 37 | #include "internal.h" | 
 | 38 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  * FIXME: remove all knowledge of the buffer layer from the core VM | 
 | 41 |  */ | 
| Jan Kara | 148f948 | 2009-08-17 19:52:36 +0200 | [diff] [blame] | 42 | #include <linux/buffer_head.h> /* for try_to_free_buffers */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <asm/mman.h> | 
 | 45 |  | 
 | 46 | /* | 
 | 47 |  * Shared mappings implemented 30.11.1994. It's not fully working yet, | 
 | 48 |  * though. | 
 | 49 |  * | 
 | 50 |  * Shared mappings now work. 15.8.1995  Bruno. | 
 | 51 |  * | 
 | 52 |  * finished 'unifying' the page and buffer cache and SMP-threaded the | 
 | 53 |  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> | 
 | 54 |  * | 
 | 55 |  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> | 
 | 56 |  */ | 
 | 57 |  | 
 | 58 | /* | 
 | 59 |  * Lock ordering: | 
 | 60 |  * | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 61 |  *  ->i_mmap_mutex		(truncate_pagecache) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 |  *    ->private_lock		(__free_pte->__set_page_dirty_buffers) | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 63 |  *      ->swap_lock		(exclusive_swap_page, others) | 
 | 64 |  *        ->mapping->tree_lock | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  * | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 66 |  *  ->i_mutex | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 67 |  *    ->i_mmap_mutex		(truncate->unmap_mapping_range) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 |  * | 
 | 69 |  *  ->mmap_sem | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 70 |  *    ->i_mmap_mutex | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 71 |  *      ->page_table_lock or pte_lock	(various, mainly in memory.c) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 |  *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock) | 
 | 73 |  * | 
 | 74 |  *  ->mmap_sem | 
 | 75 |  *    ->lock_page		(access_process_vm) | 
 | 76 |  * | 
| Nick Piggin | 82591e6 | 2006-10-19 23:29:10 -0700 | [diff] [blame] | 77 |  *  ->i_mutex			(generic_file_buffered_write) | 
 | 78 |  *    ->mmap_sem		(fault_in_pages_readable->do_page_fault) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 |  * | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 80 |  *  bdi->wb.list_lock | 
| Dave Chinner | a66979a | 2011-03-22 22:23:41 +1100 | [diff] [blame] | 81 |  *    sb_lock			(fs/fs-writeback.c) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 |  *    ->mapping->tree_lock	(__sync_single_inode) | 
 | 83 |  * | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 84 |  *  ->i_mmap_mutex | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 |  *    ->anon_vma.lock		(vma_adjust) | 
 | 86 |  * | 
 | 87 |  *  ->anon_vma.lock | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 88 |  *    ->page_table_lock or pte_lock	(anon_vma_prepare and various) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  * | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 90 |  *  ->page_table_lock or pte_lock | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 91 |  *    ->swap_lock		(try_to_unmap_one) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 |  *    ->private_lock		(try_to_unmap_one) | 
 | 93 |  *    ->tree_lock		(try_to_unmap_one) | 
 | 94 |  *    ->zone.lru_lock		(follow_page->mark_page_accessed) | 
| Nick Piggin | 053837f | 2006-01-18 17:42:27 -0800 | [diff] [blame] | 95 |  *    ->zone.lru_lock		(check_pte_range->isolate_lru_page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  *    ->private_lock		(page_remove_rmap->set_page_dirty) | 
 | 97 |  *    ->tree_lock		(page_remove_rmap->set_page_dirty) | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 98 |  *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty) | 
| Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 99 |  *    ->inode->i_lock		(page_remove_rmap->set_page_dirty) | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 100 |  *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty) | 
| Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 101 |  *    ->inode->i_lock		(zap_pte_range->set_page_dirty) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 |  *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers) | 
 | 103 |  * | 
| Andi Kleen | 6a46079 | 2009-09-16 11:50:15 +0200 | [diff] [blame] | 104 |  *  (code doesn't rely on that order, so you could switch it around) | 
 | 105 |  *  ->tasklist_lock             (memory_failure, collect_procs_ao) | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 106 |  *    ->i_mmap_mutex | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 |  */ | 
 | 108 |  | 
 | 109 | /* | 
| Minchan Kim | e64a782 | 2011-03-22 16:32:44 -0700 | [diff] [blame] | 110 |  * Delete a page from the page cache and free it. Caller has to make | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 |  * sure the page is locked and that nobody else uses it - or that usage | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 112 |  * is safe.  The caller must hold the mapping's tree_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 |  */ | 
| Minchan Kim | e64a782 | 2011-03-22 16:32:44 -0700 | [diff] [blame] | 114 | void __delete_from_page_cache(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | { | 
 | 116 | 	struct address_space *mapping = page->mapping; | 
 | 117 |  | 
| Dan Magenheimer | c515e1f | 2011-05-26 10:01:43 -0600 | [diff] [blame] | 118 | 	/* | 
 | 119 | 	 * if we're uptodate, flush out into the cleancache, otherwise | 
 | 120 | 	 * invalidate any existing cleancache entries.  We can't leave | 
 | 121 | 	 * stale data around in the cleancache once our page is gone | 
 | 122 | 	 */ | 
 | 123 | 	if (PageUptodate(page) && PageMappedToDisk(page)) | 
 | 124 | 		cleancache_put_page(page); | 
 | 125 | 	else | 
 | 126 | 		cleancache_flush_page(mapping, page); | 
 | 127 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | 	radix_tree_delete(&mapping->page_tree, page->index); | 
 | 129 | 	page->mapping = NULL; | 
| Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 130 | 	/* Leave page->index set: truncation lookup relies upon it */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | 	mapping->nrpages--; | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 132 | 	__dec_zone_page_state(page, NR_FILE_PAGES); | 
| KOSAKI Motohiro | 4b02108 | 2009-09-21 17:01:33 -0700 | [diff] [blame] | 133 | 	if (PageSwapBacked(page)) | 
 | 134 | 		__dec_zone_page_state(page, NR_SHMEM); | 
| Nick Piggin | 4542681 | 2007-07-15 23:38:12 -0700 | [diff] [blame] | 135 | 	BUG_ON(page_mapped(page)); | 
| Linus Torvalds | 3a69279 | 2007-12-19 14:05:13 -0800 | [diff] [blame] | 136 |  | 
 | 137 | 	/* | 
 | 138 | 	 * Some filesystems seem to re-dirty the page even after | 
 | 139 | 	 * the VM has canceled the dirty bit (eg ext3 journaling). | 
 | 140 | 	 * | 
 | 141 | 	 * Fix it up by doing a final dirty accounting check after | 
 | 142 | 	 * having removed the page entirely. | 
 | 143 | 	 */ | 
 | 144 | 	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { | 
 | 145 | 		dec_zone_page_state(page, NR_FILE_DIRTY); | 
 | 146 | 		dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 
 | 147 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } | 
 | 149 |  | 
| Minchan Kim | 702cfbf | 2011-03-22 16:32:43 -0700 | [diff] [blame] | 150 | /** | 
 | 151 |  * delete_from_page_cache - delete page from page cache | 
 | 152 |  * @page: the page which the kernel is trying to remove from page cache | 
 | 153 |  * | 
 | 154 |  * This must be called only on pages that have been verified to be in the page | 
 | 155 |  * cache and locked.  It will never put the page into the free list, the caller | 
 | 156 |  * has a reference on the page. | 
 | 157 |  */ | 
 | 158 | void delete_from_page_cache(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | { | 
 | 160 | 	struct address_space *mapping = page->mapping; | 
| Linus Torvalds | 6072d13 | 2010-12-01 13:35:19 -0500 | [diff] [blame] | 161 | 	void (*freepage)(struct page *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 |  | 
| Matt Mackall | cd7619d | 2005-05-01 08:59:01 -0700 | [diff] [blame] | 163 | 	BUG_ON(!PageLocked(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  | 
| Linus Torvalds | 6072d13 | 2010-12-01 13:35:19 -0500 | [diff] [blame] | 165 | 	freepage = mapping->a_ops->freepage; | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 166 | 	spin_lock_irq(&mapping->tree_lock); | 
| Minchan Kim | e64a782 | 2011-03-22 16:32:44 -0700 | [diff] [blame] | 167 | 	__delete_from_page_cache(page); | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 168 | 	spin_unlock_irq(&mapping->tree_lock); | 
| Daisuke Nishimura | e767e05 | 2009-05-28 14:34:28 -0700 | [diff] [blame] | 169 | 	mem_cgroup_uncharge_cache_page(page); | 
| Linus Torvalds | 6072d13 | 2010-12-01 13:35:19 -0500 | [diff] [blame] | 170 |  | 
 | 171 | 	if (freepage) | 
 | 172 | 		freepage(page); | 
| Minchan Kim | 97cecb5 | 2011-03-22 16:30:53 -0700 | [diff] [blame] | 173 | 	page_cache_release(page); | 
 | 174 | } | 
 | 175 | EXPORT_SYMBOL(delete_from_page_cache); | 
 | 176 |  | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 177 | static int sleep_on_page(void *word) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | 	io_schedule(); | 
 | 180 | 	return 0; | 
 | 181 | } | 
 | 182 |  | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 183 | static int sleep_on_page_killable(void *word) | 
| Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 184 | { | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 185 | 	sleep_on_page(word); | 
| Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 186 | 	return fatal_signal_pending(current) ? -EINTR : 0; | 
 | 187 | } | 
 | 188 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | /** | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 190 |  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 191 |  * @mapping:	address space structure to write | 
 | 192 |  * @start:	offset in bytes where the range starts | 
| Andrew Morton | 469eb4d | 2006-03-24 03:17:45 -0800 | [diff] [blame] | 193 |  * @end:	offset in bytes where the range ends (inclusive) | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 194 |  * @sync_mode:	enable synchronous operation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 |  * | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 196 |  * Start writeback against all of a mapping's dirty pages that lie | 
 | 197 |  * within the byte offsets <start, end> inclusive. | 
 | 198 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 |  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 200 |  * opposed to a regular memory cleansing writeback.  The difference between | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 |  * these two operations is that if a dirty page/buffer is encountered, it must | 
 | 202 |  * be waited upon, and not just skipped over. | 
 | 203 |  */ | 
| Andrew Morton | ebcf28e | 2006-03-24 03:18:04 -0800 | [diff] [blame] | 204 | int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, | 
 | 205 | 				loff_t end, int sync_mode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | { | 
 | 207 | 	int ret; | 
 | 208 | 	struct writeback_control wbc = { | 
 | 209 | 		.sync_mode = sync_mode, | 
| Nick Piggin | 05fe478 | 2009-01-06 14:39:08 -0800 | [diff] [blame] | 210 | 		.nr_to_write = LONG_MAX, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 211 | 		.range_start = start, | 
 | 212 | 		.range_end = end, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | 	}; | 
 | 214 |  | 
 | 215 | 	if (!mapping_cap_writeback_dirty(mapping)) | 
 | 216 | 		return 0; | 
 | 217 |  | 
 | 218 | 	ret = do_writepages(mapping, &wbc); | 
 | 219 | 	return ret; | 
 | 220 | } | 
 | 221 |  | 
 | 222 | static inline int __filemap_fdatawrite(struct address_space *mapping, | 
 | 223 | 	int sync_mode) | 
 | 224 | { | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 225 | 	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | } | 
 | 227 |  | 
 | 228 | int filemap_fdatawrite(struct address_space *mapping) | 
 | 229 | { | 
 | 230 | 	return __filemap_fdatawrite(mapping, WB_SYNC_ALL); | 
 | 231 | } | 
 | 232 | EXPORT_SYMBOL(filemap_fdatawrite); | 
 | 233 |  | 
| Jan Kara | f4c0a0f | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 234 | int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, | 
| Andrew Morton | ebcf28e | 2006-03-24 03:18:04 -0800 | [diff] [blame] | 235 | 				loff_t end) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { | 
 | 237 | 	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); | 
 | 238 | } | 
| Jan Kara | f4c0a0f | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 239 | EXPORT_SYMBOL(filemap_fdatawrite_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 241 | /** | 
 | 242 |  * filemap_flush - mostly a non-blocking flush | 
 | 243 |  * @mapping:	target address_space | 
 | 244 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 |  * This is a mostly non-blocking flush.  Not suitable for data-integrity | 
 | 246 |  * purposes - I/O may not be started against all dirty pages. | 
 | 247 |  */ | 
 | 248 | int filemap_flush(struct address_space *mapping) | 
 | 249 | { | 
 | 250 | 	return __filemap_fdatawrite(mapping, WB_SYNC_NONE); | 
 | 251 | } | 
 | 252 | EXPORT_SYMBOL(filemap_flush); | 
 | 253 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 254 | /** | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 255 |  * filemap_fdatawait_range - wait for writeback to complete | 
 | 256 |  * @mapping:		address space structure to wait for | 
 | 257 |  * @start_byte:		offset in bytes where the range starts | 
 | 258 |  * @end_byte:		offset in bytes where the range ends (inclusive) | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 259 |  * | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 260 |  * Walk the list of under-writeback pages of the given address space | 
 | 261 |  * in the given range and wait for all of them. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 |  */ | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 263 | int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, | 
 | 264 | 			    loff_t end_byte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | { | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 266 | 	pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; | 
 | 267 | 	pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | 	struct pagevec pvec; | 
 | 269 | 	int nr_pages; | 
 | 270 | 	int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 |  | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 272 | 	if (end_byte < start_byte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | 		return 0; | 
 | 274 |  | 
 | 275 | 	pagevec_init(&pvec, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | 	while ((index <= end) && | 
 | 277 | 			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 
 | 278 | 			PAGECACHE_TAG_WRITEBACK, | 
 | 279 | 			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { | 
 | 280 | 		unsigned i; | 
 | 281 |  | 
 | 282 | 		for (i = 0; i < nr_pages; i++) { | 
 | 283 | 			struct page *page = pvec.pages[i]; | 
 | 284 |  | 
 | 285 | 			/* until radix tree lookup accepts end_index */ | 
 | 286 | 			if (page->index > end) | 
 | 287 | 				continue; | 
 | 288 |  | 
 | 289 | 			wait_on_page_writeback(page); | 
| Rik van Riel | 212260a | 2011-01-13 15:46:06 -0800 | [diff] [blame] | 290 | 			if (TestClearPageError(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | 				ret = -EIO; | 
 | 292 | 		} | 
 | 293 | 		pagevec_release(&pvec); | 
 | 294 | 		cond_resched(); | 
 | 295 | 	} | 
 | 296 |  | 
 | 297 | 	/* Check for outstanding write errors */ | 
 | 298 | 	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) | 
 | 299 | 		ret = -ENOSPC; | 
 | 300 | 	if (test_and_clear_bit(AS_EIO, &mapping->flags)) | 
 | 301 | 		ret = -EIO; | 
 | 302 |  | 
 | 303 | 	return ret; | 
 | 304 | } | 
| Jan Kara | d3bccb6 | 2009-08-17 19:30:27 +0200 | [diff] [blame] | 305 | EXPORT_SYMBOL(filemap_fdatawait_range); | 
 | 306 |  | 
 | 307 | /** | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 308 |  * filemap_fdatawait - wait for all under-writeback pages to complete | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 |  * @mapping: address space structure to wait for | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 310 |  * | 
 | 311 |  * Walk the list of under-writeback pages of the given address space | 
 | 312 |  * and wait for all of them. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 |  */ | 
 | 314 | int filemap_fdatawait(struct address_space *mapping) | 
 | 315 | { | 
 | 316 | 	loff_t i_size = i_size_read(mapping->host); | 
 | 317 |  | 
 | 318 | 	if (i_size == 0) | 
 | 319 | 		return 0; | 
 | 320 |  | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 321 | 	return filemap_fdatawait_range(mapping, 0, i_size - 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | } | 
 | 323 | EXPORT_SYMBOL(filemap_fdatawait); | 
 | 324 |  | 
 | 325 | int filemap_write_and_wait(struct address_space *mapping) | 
 | 326 | { | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 327 | 	int err = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 |  | 
 | 329 | 	if (mapping->nrpages) { | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 330 | 		err = filemap_fdatawrite(mapping); | 
 | 331 | 		/* | 
 | 332 | 		 * Even if the above returned error, the pages may be | 
 | 333 | 		 * written partially (e.g. -ENOSPC), so we wait for it. | 
 | 334 | 		 * But the -EIO is special case, it may indicate the worst | 
 | 335 | 		 * thing (e.g. bug) happened, so we avoid waiting for it. | 
 | 336 | 		 */ | 
 | 337 | 		if (err != -EIO) { | 
 | 338 | 			int err2 = filemap_fdatawait(mapping); | 
 | 339 | 			if (!err) | 
 | 340 | 				err = err2; | 
 | 341 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | 	} | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 343 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | } | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 345 | EXPORT_SYMBOL(filemap_write_and_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 347 | /** | 
 | 348 |  * filemap_write_and_wait_range - write out & wait on a file range | 
 | 349 |  * @mapping:	the address_space for the pages | 
 | 350 |  * @lstart:	offset in bytes where the range starts | 
 | 351 |  * @lend:	offset in bytes where the range ends (inclusive) | 
 | 352 |  * | 
| Andrew Morton | 469eb4d | 2006-03-24 03:17:45 -0800 | [diff] [blame] | 353 |  * Write out and wait upon file offsets lstart->lend, inclusive. | 
 | 354 |  * | 
 | 355 |  * Note that `lend' is inclusive (describes the last byte to be written) so | 
 | 356 |  * that this function can be used to write to the very end-of-file (end = -1). | 
 | 357 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | int filemap_write_and_wait_range(struct address_space *mapping, | 
 | 359 | 				 loff_t lstart, loff_t lend) | 
 | 360 | { | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 361 | 	int err = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 |  | 
 | 363 | 	if (mapping->nrpages) { | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 364 | 		err = __filemap_fdatawrite_range(mapping, lstart, lend, | 
 | 365 | 						 WB_SYNC_ALL); | 
 | 366 | 		/* See comment of filemap_write_and_wait() */ | 
 | 367 | 		if (err != -EIO) { | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 368 | 			int err2 = filemap_fdatawait_range(mapping, | 
 | 369 | 						lstart, lend); | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 370 | 			if (!err) | 
 | 371 | 				err = err2; | 
 | 372 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | 	} | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 374 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | } | 
| Chris Mason | f699558 | 2009-04-15 13:22:37 -0400 | [diff] [blame] | 376 | EXPORT_SYMBOL(filemap_write_and_wait_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 378 | /** | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 379 |  * replace_page_cache_page - replace a pagecache page with a new one | 
 | 380 |  * @old:	page to be replaced | 
 | 381 |  * @new:	page to replace with | 
 | 382 |  * @gfp_mask:	allocation mode | 
 | 383 |  * | 
 | 384 |  * This function replaces a page in the pagecache with a new one.  On | 
 | 385 |  * success it acquires the pagecache reference for the new page and | 
 | 386 |  * drops it for the old page.  Both the old and new pages must be | 
 | 387 |  * locked.  This function does not add the new page to the LRU, the | 
 | 388 |  * caller must do that. | 
 | 389 |  * | 
 | 390 |  * The remove + add is atomic.  The only way this function can fail is | 
 | 391 |  * memory allocation failure. | 
 | 392 |  */ | 
 | 393 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | 
 | 394 | { | 
 | 395 | 	int error; | 
 | 396 | 	struct mem_cgroup *memcg = NULL; | 
 | 397 |  | 
 | 398 | 	VM_BUG_ON(!PageLocked(old)); | 
 | 399 | 	VM_BUG_ON(!PageLocked(new)); | 
 | 400 | 	VM_BUG_ON(new->mapping); | 
 | 401 |  | 
 | 402 | 	/* | 
 | 403 | 	 * This is not page migration, but prepare_migration and | 
 | 404 | 	 * end_migration does enough work for charge replacement. | 
 | 405 | 	 * | 
 | 406 | 	 * In the longer term we probably want a specialized function | 
 | 407 | 	 * for moving the charge from old to new in a more efficient | 
 | 408 | 	 * manner. | 
 | 409 | 	 */ | 
 | 410 | 	error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask); | 
 | 411 | 	if (error) | 
 | 412 | 		return error; | 
 | 413 |  | 
 | 414 | 	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 
 | 415 | 	if (!error) { | 
 | 416 | 		struct address_space *mapping = old->mapping; | 
 | 417 | 		void (*freepage)(struct page *); | 
 | 418 |  | 
 | 419 | 		pgoff_t offset = old->index; | 
 | 420 | 		freepage = mapping->a_ops->freepage; | 
 | 421 |  | 
 | 422 | 		page_cache_get(new); | 
 | 423 | 		new->mapping = mapping; | 
 | 424 | 		new->index = offset; | 
 | 425 |  | 
 | 426 | 		spin_lock_irq(&mapping->tree_lock); | 
| Minchan Kim | e64a782 | 2011-03-22 16:32:44 -0700 | [diff] [blame] | 427 | 		__delete_from_page_cache(old); | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 428 | 		error = radix_tree_insert(&mapping->page_tree, offset, new); | 
 | 429 | 		BUG_ON(error); | 
 | 430 | 		mapping->nrpages++; | 
 | 431 | 		__inc_zone_page_state(new, NR_FILE_PAGES); | 
 | 432 | 		if (PageSwapBacked(new)) | 
 | 433 | 			__inc_zone_page_state(new, NR_SHMEM); | 
 | 434 | 		spin_unlock_irq(&mapping->tree_lock); | 
 | 435 | 		radix_tree_preload_end(); | 
 | 436 | 		if (freepage) | 
 | 437 | 			freepage(old); | 
 | 438 | 		page_cache_release(old); | 
 | 439 | 		mem_cgroup_end_migration(memcg, old, new, true); | 
 | 440 | 	} else { | 
 | 441 | 		mem_cgroup_end_migration(memcg, old, new, false); | 
 | 442 | 	} | 
 | 443 |  | 
 | 444 | 	return error; | 
 | 445 | } | 
 | 446 | EXPORT_SYMBOL_GPL(replace_page_cache_page); | 
 | 447 |  | 
 | 448 | /** | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 449 |  * add_to_page_cache_locked - add a locked page to the pagecache | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 450 |  * @page:	page to add | 
 | 451 |  * @mapping:	the page's address_space | 
 | 452 |  * @offset:	page index | 
 | 453 |  * @gfp_mask:	page allocation mode | 
 | 454 |  * | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 455 |  * This function is used to add a page to the pagecache. It must be locked. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 |  * This function does not add the page to the LRU.  The caller must do that. | 
 | 457 |  */ | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 458 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | 
| Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 459 | 		pgoff_t offset, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | { | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 461 | 	int error; | 
 | 462 |  | 
 | 463 | 	VM_BUG_ON(!PageLocked(page)); | 
| Hugh Dickins | 31475dd | 2011-08-03 16:21:27 -0700 | [diff] [blame] | 464 | 	VM_BUG_ON(PageSwapBacked(page)); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 465 |  | 
 | 466 | 	error = mem_cgroup_cache_charge(page, current->mm, | 
| KAMEZAWA Hiroyuki | 2c26fdd | 2009-01-07 18:08:10 -0800 | [diff] [blame] | 467 | 					gfp_mask & GFP_RECLAIM_MASK); | 
| Balbir Singh | 35c754d | 2008-02-07 00:14:05 -0800 | [diff] [blame] | 468 | 	if (error) | 
 | 469 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 |  | 
| Balbir Singh | 35c754d | 2008-02-07 00:14:05 -0800 | [diff] [blame] | 471 | 	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | 	if (error == 0) { | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 473 | 		page_cache_get(page); | 
 | 474 | 		page->mapping = mapping; | 
 | 475 | 		page->index = offset; | 
 | 476 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 477 | 		spin_lock_irq(&mapping->tree_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | 		error = radix_tree_insert(&mapping->page_tree, offset, page); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 479 | 		if (likely(!error)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | 			mapping->nrpages++; | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 481 | 			__inc_zone_page_state(page, NR_FILE_PAGES); | 
| Daisuke Nishimura | e767e05 | 2009-05-28 14:34:28 -0700 | [diff] [blame] | 482 | 			spin_unlock_irq(&mapping->tree_lock); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 483 | 		} else { | 
 | 484 | 			page->mapping = NULL; | 
| Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 485 | 			/* Leave page->index set: truncation relies upon it */ | 
| Daisuke Nishimura | e767e05 | 2009-05-28 14:34:28 -0700 | [diff] [blame] | 486 | 			spin_unlock_irq(&mapping->tree_lock); | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 487 | 			mem_cgroup_uncharge_cache_page(page); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 488 | 			page_cache_release(page); | 
 | 489 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | 		radix_tree_preload_end(); | 
| Balbir Singh | 35c754d | 2008-02-07 00:14:05 -0800 | [diff] [blame] | 491 | 	} else | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 492 | 		mem_cgroup_uncharge_cache_page(page); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 493 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | 	return error; | 
 | 495 | } | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 496 | EXPORT_SYMBOL(add_to_page_cache_locked); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 |  | 
 | 498 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 
| Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 499 | 				pgoff_t offset, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | { | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 501 | 	int ret; | 
 | 502 |  | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 503 | 	ret = add_to_page_cache(page, mapping, offset, gfp_mask); | 
| Hugh Dickins | 31475dd | 2011-08-03 16:21:27 -0700 | [diff] [blame] | 504 | 	if (ret == 0) | 
 | 505 | 		lru_cache_add_file(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | 	return ret; | 
 | 507 | } | 
| Evgeniy Polyakov | 18bc0bb | 2009-02-09 17:02:42 +0300 | [diff] [blame] | 508 | EXPORT_SYMBOL_GPL(add_to_page_cache_lru); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 |  | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 510 | #ifdef CONFIG_NUMA | 
| Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 511 | struct page *__page_cache_alloc(gfp_t gfp) | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 512 | { | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 513 | 	int n; | 
 | 514 | 	struct page *page; | 
 | 515 |  | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 516 | 	if (cpuset_do_page_mem_spread()) { | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 517 | 		get_mems_allowed(); | 
 | 518 | 		n = cpuset_mem_spread_node(); | 
 | 519 | 		page = alloc_pages_exact_node(n, gfp, 0); | 
 | 520 | 		put_mems_allowed(); | 
 | 521 | 		return page; | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 522 | 	} | 
| Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 523 | 	return alloc_pages(gfp, 0); | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 524 | } | 
| Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 525 | EXPORT_SYMBOL(__page_cache_alloc); | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 526 | #endif | 
 | 527 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | /* | 
 | 529 |  * In order to wait for pages to become available there must be | 
 | 530 |  * waitqueues associated with pages. By using a hash table of | 
 | 531 |  * waitqueues where the bucket discipline is to maintain all | 
 | 532 |  * waiters on the same queue and wake all when any of the pages | 
 | 533 |  * become available, and for the woken contexts to check to be | 
 | 534 |  * sure the appropriate page became available, this saves space | 
 | 535 |  * at a cost of "thundering herd" phenomena during rare hash | 
 | 536 |  * collisions. | 
 | 537 |  */ | 
 | 538 | static wait_queue_head_t *page_waitqueue(struct page *page) | 
 | 539 | { | 
 | 540 | 	const struct zone *zone = page_zone(page); | 
 | 541 |  | 
 | 542 | 	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; | 
 | 543 | } | 
 | 544 |  | 
 | 545 | static inline void wake_up_page(struct page *page, int bit) | 
 | 546 | { | 
 | 547 | 	__wake_up_bit(page_waitqueue(page), &page->flags, bit); | 
 | 548 | } | 
 | 549 |  | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 550 | void wait_on_page_bit(struct page *page, int bit_nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | { | 
 | 552 | 	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); | 
 | 553 |  | 
 | 554 | 	if (test_bit(bit_nr, &page->flags)) | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 555 | 		__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | 							TASK_UNINTERRUPTIBLE); | 
 | 557 | } | 
 | 558 | EXPORT_SYMBOL(wait_on_page_bit); | 
 | 559 |  | 
| KOSAKI Motohiro | f62e00c | 2011-05-24 17:11:29 -0700 | [diff] [blame] | 560 | int wait_on_page_bit_killable(struct page *page, int bit_nr) | 
 | 561 | { | 
 | 562 | 	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); | 
 | 563 |  | 
 | 564 | 	if (!test_bit(bit_nr, &page->flags)) | 
 | 565 | 		return 0; | 
 | 566 |  | 
 | 567 | 	return __wait_on_bit(page_waitqueue(page), &wait, | 
 | 568 | 			     sleep_on_page_killable, TASK_KILLABLE); | 
 | 569 | } | 
 | 570 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | /** | 
| David Howells | 385e1ca | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 572 |  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue | 
| Randy Dunlap | 697f619 | 2009-04-13 14:39:54 -0700 | [diff] [blame] | 573 |  * @page: Page defining the wait queue of interest | 
 | 574 |  * @waiter: Waiter to add to the queue | 
| David Howells | 385e1ca | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 575 |  * | 
 | 576 |  * Add an arbitrary @waiter to the wait queue for the nominated @page. | 
 | 577 |  */ | 
 | 578 | void add_page_wait_queue(struct page *page, wait_queue_t *waiter) | 
 | 579 | { | 
 | 580 | 	wait_queue_head_t *q = page_waitqueue(page); | 
 | 581 | 	unsigned long flags; | 
 | 582 |  | 
 | 583 | 	spin_lock_irqsave(&q->lock, flags); | 
 | 584 | 	__add_wait_queue(q, waiter); | 
 | 585 | 	spin_unlock_irqrestore(&q->lock, flags); | 
 | 586 | } | 
 | 587 | EXPORT_SYMBOL_GPL(add_page_wait_queue); | 
 | 588 |  | 
 | 589 | /** | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 590 |  * unlock_page - unlock a locked page | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 |  * @page: the page | 
 | 592 |  * | 
 | 593 |  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). | 
 | 594 |  * Also wakes sleepers in wait_on_page_writeback() because the wakeup | 
 | 595 |  * mechananism between PageLocked pages and PageWriteback pages is shared. | 
 | 596 |  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. | 
 | 597 |  * | 
| Nick Piggin | 8413ac9 | 2008-10-18 20:26:59 -0700 | [diff] [blame] | 598 |  * The mb is necessary to enforce ordering between the clear_bit and the read | 
 | 599 |  * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 |  */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 601 | void unlock_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | { | 
| Nick Piggin | 8413ac9 | 2008-10-18 20:26:59 -0700 | [diff] [blame] | 603 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 604 | 	clear_bit_unlock(PG_locked, &page->flags); | 
 | 605 | 	smp_mb__after_clear_bit(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | 	wake_up_page(page, PG_locked); | 
 | 607 | } | 
 | 608 | EXPORT_SYMBOL(unlock_page); | 
 | 609 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 610 | /** | 
 | 611 |  * end_page_writeback - end writeback against a page | 
 | 612 |  * @page: the page | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 |  */ | 
 | 614 | void end_page_writeback(struct page *page) | 
 | 615 | { | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 616 | 	if (TestClearPageReclaim(page)) | 
 | 617 | 		rotate_reclaimable_page(page); | 
 | 618 |  | 
 | 619 | 	if (!test_clear_page_writeback(page)) | 
 | 620 | 		BUG(); | 
 | 621 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | 	smp_mb__after_clear_bit(); | 
 | 623 | 	wake_up_page(page, PG_writeback); | 
 | 624 | } | 
 | 625 | EXPORT_SYMBOL(end_page_writeback); | 
 | 626 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 627 | /** | 
 | 628 |  * __lock_page - get a lock on the page, assuming we need to sleep to get it | 
 | 629 |  * @page: the page to lock | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 |  */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 631 | void __lock_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | { | 
 | 633 | 	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 
 | 634 |  | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 635 | 	__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | 							TASK_UNINTERRUPTIBLE); | 
 | 637 | } | 
 | 638 | EXPORT_SYMBOL(__lock_page); | 
 | 639 |  | 
| Harvey Harrison | b5606c2 | 2008-02-13 15:03:16 -0800 | [diff] [blame] | 640 | int __lock_page_killable(struct page *page) | 
| Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 641 | { | 
 | 642 | 	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 
 | 643 |  | 
 | 644 | 	return __wait_on_bit_lock(page_waitqueue(page), &wait, | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 645 | 					sleep_on_page_killable, TASK_KILLABLE); | 
| Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 646 | } | 
| Evgeniy Polyakov | 18bc0bb | 2009-02-09 17:02:42 +0300 | [diff] [blame] | 647 | EXPORT_SYMBOL_GPL(__lock_page_killable); | 
| Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 648 |  | 
| Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 649 | int __lock_page_or_retry(struct page *page, struct mm_struct *mm, | 
 | 650 | 			 unsigned int flags) | 
 | 651 | { | 
| KOSAKI Motohiro | 37b23e0 | 2011-05-24 17:11:30 -0700 | [diff] [blame] | 652 | 	if (flags & FAULT_FLAG_ALLOW_RETRY) { | 
 | 653 | 		/* | 
 | 654 | 		 * CAUTION! In this case, mmap_sem is not released | 
 | 655 | 		 * even though return 0. | 
 | 656 | 		 */ | 
 | 657 | 		if (flags & FAULT_FLAG_RETRY_NOWAIT) | 
 | 658 | 			return 0; | 
 | 659 |  | 
 | 660 | 		up_read(&mm->mmap_sem); | 
 | 661 | 		if (flags & FAULT_FLAG_KILLABLE) | 
 | 662 | 			wait_on_page_locked_killable(page); | 
 | 663 | 		else | 
| Gleb Natapov | 318b275 | 2011-03-22 16:30:51 -0700 | [diff] [blame] | 664 | 			wait_on_page_locked(page); | 
| Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 665 | 		return 0; | 
| KOSAKI Motohiro | 37b23e0 | 2011-05-24 17:11:30 -0700 | [diff] [blame] | 666 | 	} else { | 
 | 667 | 		if (flags & FAULT_FLAG_KILLABLE) { | 
 | 668 | 			int ret; | 
 | 669 |  | 
 | 670 | 			ret = __lock_page_killable(page); | 
 | 671 | 			if (ret) { | 
 | 672 | 				up_read(&mm->mmap_sem); | 
 | 673 | 				return 0; | 
 | 674 | 			} | 
 | 675 | 		} else | 
 | 676 | 			__lock_page(page); | 
 | 677 | 		return 1; | 
| Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 678 | 	} | 
 | 679 | } | 
 | 680 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 681 | /** | 
 | 682 |  * find_get_page - find and get a page reference | 
 | 683 |  * @mapping: the address_space to search | 
 | 684 |  * @offset: the page index | 
 | 685 |  * | 
| Nick Piggin | da6052f | 2006-09-25 23:31:35 -0700 | [diff] [blame] | 686 |  * Is there a pagecache struct page at the given (mapping, offset) tuple? | 
 | 687 |  * If yes, increment its refcount and return it; if no, return NULL. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 |  */ | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 689 | struct page *find_get_page(struct address_space *mapping, pgoff_t offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | { | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 691 | 	void **pagep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | 	struct page *page; | 
 | 693 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 694 | 	rcu_read_lock(); | 
 | 695 | repeat: | 
 | 696 | 	page = NULL; | 
 | 697 | 	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); | 
 | 698 | 	if (pagep) { | 
 | 699 | 		page = radix_tree_deref_slot(pagep); | 
| Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 700 | 		if (unlikely(!page)) | 
 | 701 | 			goto out; | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 702 | 		if (radix_tree_exception(page)) { | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 703 | 			if (radix_tree_deref_retry(page)) | 
 | 704 | 				goto repeat; | 
 | 705 | 			/* | 
 | 706 | 			 * Otherwise, shmem/tmpfs must be storing a swap entry | 
 | 707 | 			 * here as an exceptional entry: so return it without | 
 | 708 | 			 * attempting to raise page count. | 
 | 709 | 			 */ | 
 | 710 | 			goto out; | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 711 | 		} | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 712 | 		if (!page_cache_get_speculative(page)) | 
 | 713 | 			goto repeat; | 
 | 714 |  | 
 | 715 | 		/* | 
 | 716 | 		 * Has the page moved? | 
 | 717 | 		 * This is part of the lockless pagecache protocol. See | 
 | 718 | 		 * include/linux/pagemap.h for details. | 
 | 719 | 		 */ | 
 | 720 | 		if (unlikely(page != *pagep)) { | 
 | 721 | 			page_cache_release(page); | 
 | 722 | 			goto repeat; | 
 | 723 | 		} | 
 | 724 | 	} | 
| Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 725 | out: | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 726 | 	rcu_read_unlock(); | 
 | 727 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | 	return page; | 
 | 729 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | EXPORT_SYMBOL(find_get_page); | 
 | 731 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 732 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 |  * find_lock_page - locate, pin and lock a pagecache page | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 734 |  * @mapping: the address_space to search | 
 | 735 |  * @offset: the page index | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 |  * | 
 | 737 |  * Locates the desired pagecache page, locks it, increments its reference | 
 | 738 |  * count and returns its address. | 
 | 739 |  * | 
 | 740 |  * Returns zero if the page was not present. find_lock_page() may sleep. | 
 | 741 |  */ | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 742 | struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | { | 
 | 744 | 	struct page *page; | 
 | 745 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | repeat: | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 747 | 	page = find_get_page(mapping, offset); | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 748 | 	if (page && !radix_tree_exception(page)) { | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 749 | 		lock_page(page); | 
 | 750 | 		/* Has the page been truncated? */ | 
 | 751 | 		if (unlikely(page->mapping != mapping)) { | 
 | 752 | 			unlock_page(page); | 
 | 753 | 			page_cache_release(page); | 
 | 754 | 			goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | 		} | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 756 | 		VM_BUG_ON(page->index != offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | 	return page; | 
 | 759 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | EXPORT_SYMBOL(find_lock_page); | 
 | 761 |  | 
 | 762 | /** | 
 | 763 |  * find_or_create_page - locate or add a pagecache page | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 764 |  * @mapping: the page's address_space | 
 | 765 |  * @index: the page's index into the mapping | 
 | 766 |  * @gfp_mask: page allocation mode | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 |  * | 
 | 768 |  * Locates a page in the pagecache.  If the page is not present, a new page | 
 | 769 |  * is allocated using @gfp_mask and is added to the pagecache and to the VM's | 
 | 770 |  * LRU list.  The returned page is locked and has its reference count | 
 | 771 |  * incremented. | 
 | 772 |  * | 
 | 773 |  * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic | 
 | 774 |  * allocation! | 
 | 775 |  * | 
 | 776 |  * find_or_create_page() returns the desired page's address, or zero on | 
 | 777 |  * memory exhaustion. | 
 | 778 |  */ | 
 | 779 | struct page *find_or_create_page(struct address_space *mapping, | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 780 | 		pgoff_t index, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | { | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 782 | 	struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | 	int err; | 
 | 784 | repeat: | 
 | 785 | 	page = find_lock_page(mapping, index); | 
 | 786 | 	if (!page) { | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 787 | 		page = __page_cache_alloc(gfp_mask); | 
 | 788 | 		if (!page) | 
 | 789 | 			return NULL; | 
| Nick Piggin | 67d58ac | 2009-01-06 14:40:28 -0800 | [diff] [blame] | 790 | 		/* | 
 | 791 | 		 * We want a regular kernel memory (not highmem or DMA etc) | 
 | 792 | 		 * allocation for the radix tree nodes, but we need to honour | 
 | 793 | 		 * the context-specific requirements the caller has asked for. | 
 | 794 | 		 * GFP_RECLAIM_MASK collects those requirements. | 
 | 795 | 		 */ | 
 | 796 | 		err = add_to_page_cache_lru(page, mapping, index, | 
 | 797 | 			(gfp_mask & GFP_RECLAIM_MASK)); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 798 | 		if (unlikely(err)) { | 
 | 799 | 			page_cache_release(page); | 
 | 800 | 			page = NULL; | 
 | 801 | 			if (err == -EEXIST) | 
 | 802 | 				goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | 	return page; | 
 | 806 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | EXPORT_SYMBOL(find_or_create_page); | 
 | 808 |  | 
 | 809 | /** | 
 | 810 |  * find_get_pages - gang pagecache lookup | 
 | 811 |  * @mapping:	The address_space to search | 
 | 812 |  * @start:	The starting page index | 
 | 813 |  * @nr_pages:	The maximum number of pages | 
 | 814 |  * @pages:	Where the resulting pages are placed | 
 | 815 |  * | 
 | 816 |  * find_get_pages() will search for and return a group of up to | 
 | 817 |  * @nr_pages pages in the mapping.  The pages are placed at @pages. | 
 | 818 |  * find_get_pages() takes a reference against the returned pages. | 
 | 819 |  * | 
 | 820 |  * The search returns a group of mapping-contiguous pages with ascending | 
 | 821 |  * indexes.  There may be holes in the indices due to not-present pages. | 
 | 822 |  * | 
 | 823 |  * find_get_pages() returns the number of pages which were found. | 
 | 824 |  */ | 
 | 825 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, | 
 | 826 | 			    unsigned int nr_pages, struct page **pages) | 
 | 827 | { | 
 | 828 | 	unsigned int i; | 
 | 829 | 	unsigned int ret; | 
| Shaohua Li | cc39c6a | 2011-09-15 08:45:19 +0800 | [diff] [blame] | 830 | 	unsigned int nr_found, nr_skip; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 832 | 	rcu_read_lock(); | 
 | 833 | restart: | 
 | 834 | 	nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, | 
| Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 835 | 				(void ***)pages, NULL, start, nr_pages); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 836 | 	ret = 0; | 
| Shaohua Li | cc39c6a | 2011-09-15 08:45:19 +0800 | [diff] [blame] | 837 | 	nr_skip = 0; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 838 | 	for (i = 0; i < nr_found; i++) { | 
 | 839 | 		struct page *page; | 
 | 840 | repeat: | 
 | 841 | 		page = radix_tree_deref_slot((void **)pages[i]); | 
 | 842 | 		if (unlikely(!page)) | 
 | 843 | 			continue; | 
| Hugh Dickins | 9d8aa4e | 2011-03-22 16:33:06 -0700 | [diff] [blame] | 844 |  | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 845 | 		if (radix_tree_exception(page)) { | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 846 | 			if (radix_tree_deref_retry(page)) { | 
 | 847 | 				/* | 
 | 848 | 				 * Transient condition which can only trigger | 
 | 849 | 				 * when entry at index 0 moves out of or back | 
 | 850 | 				 * to root: none yet gotten, safe to restart. | 
 | 851 | 				 */ | 
 | 852 | 				WARN_ON(start | i); | 
 | 853 | 				goto restart; | 
 | 854 | 			} | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 855 | 			/* | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 856 | 			 * Otherwise, shmem/tmpfs must be storing a swap entry | 
 | 857 | 			 * here as an exceptional entry: so skip over it - | 
 | 858 | 			 * we only reach this from invalidate_mapping_pages(). | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 859 | 			 */ | 
| Shaohua Li | cc39c6a | 2011-09-15 08:45:19 +0800 | [diff] [blame] | 860 | 			nr_skip++; | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 861 | 			continue; | 
| Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 862 | 		} | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 863 |  | 
 | 864 | 		if (!page_cache_get_speculative(page)) | 
 | 865 | 			goto repeat; | 
 | 866 |  | 
 | 867 | 		/* Has the page moved? */ | 
 | 868 | 		if (unlikely(page != *((void **)pages[i]))) { | 
 | 869 | 			page_cache_release(page); | 
 | 870 | 			goto repeat; | 
 | 871 | 		} | 
 | 872 |  | 
 | 873 | 		pages[ret] = page; | 
 | 874 | 		ret++; | 
 | 875 | 	} | 
| Hugh Dickins | 5b280c0 | 2011-03-22 16:33:07 -0700 | [diff] [blame] | 876 |  | 
 | 877 | 	/* | 
 | 878 | 	 * If all entries were removed before we could secure them, | 
 | 879 | 	 * try again, because callers stop trying once 0 is returned. | 
 | 880 | 	 */ | 
| Shaohua Li | cc39c6a | 2011-09-15 08:45:19 +0800 | [diff] [blame] | 881 | 	if (unlikely(!ret && nr_found > nr_skip)) | 
| Hugh Dickins | 5b280c0 | 2011-03-22 16:33:07 -0700 | [diff] [blame] | 882 | 		goto restart; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 883 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | 	return ret; | 
 | 885 | } | 
 | 886 |  | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 887 | /** | 
 | 888 |  * find_get_pages_contig - gang contiguous pagecache lookup | 
 | 889 |  * @mapping:	The address_space to search | 
 | 890 |  * @index:	The starting page index | 
 | 891 |  * @nr_pages:	The maximum number of pages | 
 | 892 |  * @pages:	Where the resulting pages are placed | 
 | 893 |  * | 
 | 894 |  * find_get_pages_contig() works exactly like find_get_pages(), except | 
 | 895 |  * that the returned number of pages are guaranteed to be contiguous. | 
 | 896 |  * | 
 | 897 |  * find_get_pages_contig() returns the number of pages which were found. | 
 | 898 |  */ | 
 | 899 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, | 
 | 900 | 			       unsigned int nr_pages, struct page **pages) | 
 | 901 | { | 
 | 902 | 	unsigned int i; | 
 | 903 | 	unsigned int ret; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 904 | 	unsigned int nr_found; | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 905 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 906 | 	rcu_read_lock(); | 
 | 907 | restart: | 
 | 908 | 	nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, | 
| Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 909 | 				(void ***)pages, NULL, index, nr_pages); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 910 | 	ret = 0; | 
 | 911 | 	for (i = 0; i < nr_found; i++) { | 
 | 912 | 		struct page *page; | 
 | 913 | repeat: | 
 | 914 | 		page = radix_tree_deref_slot((void **)pages[i]); | 
 | 915 | 		if (unlikely(!page)) | 
 | 916 | 			continue; | 
| Hugh Dickins | 9d8aa4e | 2011-03-22 16:33:06 -0700 | [diff] [blame] | 917 |  | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 918 | 		if (radix_tree_exception(page)) { | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 919 | 			if (radix_tree_deref_retry(page)) { | 
 | 920 | 				/* | 
 | 921 | 				 * Transient condition which can only trigger | 
 | 922 | 				 * when entry at index 0 moves out of or back | 
 | 923 | 				 * to root: none yet gotten, safe to restart. | 
 | 924 | 				 */ | 
 | 925 | 				goto restart; | 
 | 926 | 			} | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 927 | 			/* | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 928 | 			 * Otherwise, shmem/tmpfs must be storing a swap entry | 
 | 929 | 			 * here as an exceptional entry: so stop looking for | 
 | 930 | 			 * contiguous pages. | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 931 | 			 */ | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 932 | 			break; | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 933 | 		} | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 934 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 935 | 		if (!page_cache_get_speculative(page)) | 
 | 936 | 			goto repeat; | 
 | 937 |  | 
 | 938 | 		/* Has the page moved? */ | 
 | 939 | 		if (unlikely(page != *((void **)pages[i]))) { | 
 | 940 | 			page_cache_release(page); | 
 | 941 | 			goto repeat; | 
 | 942 | 		} | 
 | 943 |  | 
| Nick Piggin | 9cbb4cb | 2011-01-13 15:45:51 -0800 | [diff] [blame] | 944 | 		/* | 
 | 945 | 		 * must check mapping and index after taking the ref. | 
 | 946 | 		 * otherwise we can get both false positives and false | 
 | 947 | 		 * negatives, which is just confusing to the caller. | 
 | 948 | 		 */ | 
 | 949 | 		if (page->mapping == NULL || page->index != index) { | 
 | 950 | 			page_cache_release(page); | 
 | 951 | 			break; | 
 | 952 | 		} | 
 | 953 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 954 | 		pages[ret] = page; | 
 | 955 | 		ret++; | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 956 | 		index++; | 
 | 957 | 	} | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 958 | 	rcu_read_unlock(); | 
 | 959 | 	return ret; | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 960 | } | 
| David Howells | ef71c15 | 2007-05-09 02:33:44 -0700 | [diff] [blame] | 961 | EXPORT_SYMBOL(find_get_pages_contig); | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 962 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 963 | /** | 
 | 964 |  * find_get_pages_tag - find and return pages that match @tag | 
 | 965 |  * @mapping:	the address_space to search | 
 | 966 |  * @index:	the starting page index | 
 | 967 |  * @tag:	the tag index | 
 | 968 |  * @nr_pages:	the maximum number of pages | 
 | 969 |  * @pages:	where the resulting pages are placed | 
 | 970 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 |  * Like find_get_pages, except we only return pages which are tagged with | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 972 |  * @tag.   We update @index to index the next page for the traversal. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 |  */ | 
 | 974 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, | 
 | 975 | 			int tag, unsigned int nr_pages, struct page **pages) | 
 | 976 | { | 
 | 977 | 	unsigned int i; | 
 | 978 | 	unsigned int ret; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 979 | 	unsigned int nr_found; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 981 | 	rcu_read_lock(); | 
 | 982 | restart: | 
 | 983 | 	nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree, | 
 | 984 | 				(void ***)pages, *index, nr_pages, tag); | 
 | 985 | 	ret = 0; | 
 | 986 | 	for (i = 0; i < nr_found; i++) { | 
 | 987 | 		struct page *page; | 
 | 988 | repeat: | 
 | 989 | 		page = radix_tree_deref_slot((void **)pages[i]); | 
 | 990 | 		if (unlikely(!page)) | 
 | 991 | 			continue; | 
| Hugh Dickins | 9d8aa4e | 2011-03-22 16:33:06 -0700 | [diff] [blame] | 992 |  | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 993 | 		if (radix_tree_exception(page)) { | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 994 | 			if (radix_tree_deref_retry(page)) { | 
 | 995 | 				/* | 
 | 996 | 				 * Transient condition which can only trigger | 
 | 997 | 				 * when entry at index 0 moves out of or back | 
 | 998 | 				 * to root: none yet gotten, safe to restart. | 
 | 999 | 				 */ | 
 | 1000 | 				goto restart; | 
 | 1001 | 			} | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1002 | 			/* | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1003 | 			 * This function is never used on a shmem/tmpfs | 
 | 1004 | 			 * mapping, so a swap entry won't be found here. | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1005 | 			 */ | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1006 | 			BUG(); | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1007 | 		} | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1008 |  | 
 | 1009 | 		if (!page_cache_get_speculative(page)) | 
 | 1010 | 			goto repeat; | 
 | 1011 |  | 
 | 1012 | 		/* Has the page moved? */ | 
 | 1013 | 		if (unlikely(page != *((void **)pages[i]))) { | 
 | 1014 | 			page_cache_release(page); | 
 | 1015 | 			goto repeat; | 
 | 1016 | 		} | 
 | 1017 |  | 
 | 1018 | 		pages[ret] = page; | 
 | 1019 | 		ret++; | 
 | 1020 | 	} | 
| Hugh Dickins | 5b280c0 | 2011-03-22 16:33:07 -0700 | [diff] [blame] | 1021 |  | 
 | 1022 | 	/* | 
 | 1023 | 	 * If all entries were removed before we could secure them, | 
 | 1024 | 	 * try again, because callers stop trying once 0 is returned. | 
 | 1025 | 	 */ | 
 | 1026 | 	if (unlikely(!ret && nr_found)) | 
 | 1027 | 		goto restart; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1028 | 	rcu_read_unlock(); | 
 | 1029 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | 	if (ret) | 
 | 1031 | 		*index = pages[ret - 1]->index + 1; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1032 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1033 | 	return ret; | 
 | 1034 | } | 
| David Howells | ef71c15 | 2007-05-09 02:33:44 -0700 | [diff] [blame] | 1035 | EXPORT_SYMBOL(find_get_pages_tag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1037 | /** | 
 | 1038 |  * grab_cache_page_nowait - returns locked page at given index in given cache | 
 | 1039 |  * @mapping: target address_space | 
 | 1040 |  * @index: the page index | 
 | 1041 |  * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 1042 |  * Same as grab_cache_page(), but do not wait if the page is unavailable. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 |  * This is intended for speculative data generators, where the data can | 
 | 1044 |  * be regenerated if the page couldn't be grabbed.  This routine should | 
 | 1045 |  * be safe to call while holding the lock for another page. | 
 | 1046 |  * | 
 | 1047 |  * Clear __GFP_FS when allocating the page to avoid recursion into the fs | 
 | 1048 |  * and deadlock against the caller's locked page. | 
 | 1049 |  */ | 
 | 1050 | struct page * | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1051 | grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1052 | { | 
 | 1053 | 	struct page *page = find_get_page(mapping, index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 |  | 
 | 1055 | 	if (page) { | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 1056 | 		if (trylock_page(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | 			return page; | 
 | 1058 | 		page_cache_release(page); | 
 | 1059 | 		return NULL; | 
 | 1060 | 	} | 
| Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 1061 | 	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); | 
| Nick Piggin | 67d58ac | 2009-01-06 14:40:28 -0800 | [diff] [blame] | 1062 | 	if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | 		page_cache_release(page); | 
 | 1064 | 		page = NULL; | 
 | 1065 | 	} | 
 | 1066 | 	return page; | 
 | 1067 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | EXPORT_SYMBOL(grab_cache_page_nowait); | 
 | 1069 |  | 
| Wu Fengguang | 76d42bd | 2006-06-25 05:48:43 -0700 | [diff] [blame] | 1070 | /* | 
 | 1071 |  * CD/DVDs are error prone. When a medium error occurs, the driver may fail | 
 | 1072 |  * a _large_ part of the i/o request. Imagine the worst scenario: | 
 | 1073 |  * | 
 | 1074 |  *      ---R__________________________________________B__________ | 
 | 1075 |  *         ^ reading here                             ^ bad block(assume 4k) | 
 | 1076 |  * | 
 | 1077 |  * read(R) => miss => readahead(R...B) => media error => frustrating retries | 
 | 1078 |  * => failing the whole request => read(R) => read(R+1) => | 
 | 1079 |  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => | 
 | 1080 |  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => | 
 | 1081 |  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... | 
 | 1082 |  * | 
 | 1083 |  * It is going insane. Fix it by quickly scaling down the readahead size. | 
 | 1084 |  */ | 
 | 1085 | static void shrink_readahead_size_eio(struct file *filp, | 
 | 1086 | 					struct file_ra_state *ra) | 
 | 1087 | { | 
| Wu Fengguang | 76d42bd | 2006-06-25 05:48:43 -0700 | [diff] [blame] | 1088 | 	ra->ra_pages /= 4; | 
| Wu Fengguang | 76d42bd | 2006-06-25 05:48:43 -0700 | [diff] [blame] | 1089 | } | 
 | 1090 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1091 | /** | 
| Christoph Hellwig | 36e7891 | 2008-02-08 04:21:24 -0800 | [diff] [blame] | 1092 |  * do_generic_file_read - generic file read routine | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1093 |  * @filp:	the file to read | 
 | 1094 |  * @ppos:	current file position | 
 | 1095 |  * @desc:	read_descriptor | 
 | 1096 |  * @actor:	read method | 
 | 1097 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 |  * This is a generic file read routine, and uses the | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1099 |  * mapping->a_ops->readpage() function for the actual low-level stuff. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 |  * | 
 | 1101 |  * This is really ugly. But the goto's actually try to clarify some | 
 | 1102 |  * of the logic when it comes to error handling etc. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 |  */ | 
| Christoph Hellwig | 36e7891 | 2008-02-08 04:21:24 -0800 | [diff] [blame] | 1104 | static void do_generic_file_read(struct file *filp, loff_t *ppos, | 
 | 1105 | 		read_descriptor_t *desc, read_actor_t actor) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1106 | { | 
| Christoph Hellwig | 36e7891 | 2008-02-08 04:21:24 -0800 | [diff] [blame] | 1107 | 	struct address_space *mapping = filp->f_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | 	struct inode *inode = mapping->host; | 
| Christoph Hellwig | 36e7891 | 2008-02-08 04:21:24 -0800 | [diff] [blame] | 1109 | 	struct file_ra_state *ra = &filp->f_ra; | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1110 | 	pgoff_t index; | 
 | 1111 | 	pgoff_t last_index; | 
 | 1112 | 	pgoff_t prev_index; | 
 | 1113 | 	unsigned long offset;      /* offset into pagecache page */ | 
| Jan Kara | ec0f163 | 2007-05-06 14:49:25 -0700 | [diff] [blame] | 1114 | 	unsigned int prev_offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1115 | 	int error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | 	index = *ppos >> PAGE_CACHE_SHIFT; | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 1118 | 	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; | 
 | 1119 | 	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | 	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; | 
 | 1121 | 	offset = *ppos & ~PAGE_CACHE_MASK; | 
 | 1122 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1123 | 	for (;;) { | 
 | 1124 | 		struct page *page; | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1125 | 		pgoff_t end_index; | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1126 | 		loff_t isize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | 		unsigned long nr, ret; | 
 | 1128 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | 		cond_resched(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | find_page: | 
 | 1131 | 		page = find_get_page(mapping, index); | 
| Fengguang Wu | 3ea89ee | 2007-07-19 01:48:02 -0700 | [diff] [blame] | 1132 | 		if (!page) { | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 1133 | 			page_cache_sync_readahead(mapping, | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 1134 | 					ra, filp, | 
| Fengguang Wu | 3ea89ee | 2007-07-19 01:48:02 -0700 | [diff] [blame] | 1135 | 					index, last_index - index); | 
 | 1136 | 			page = find_get_page(mapping, index); | 
 | 1137 | 			if (unlikely(page == NULL)) | 
 | 1138 | 				goto no_cached_page; | 
 | 1139 | 		} | 
 | 1140 | 		if (PageReadahead(page)) { | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 1141 | 			page_cache_async_readahead(mapping, | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 1142 | 					ra, filp, page, | 
| Fengguang Wu | 3ea89ee | 2007-07-19 01:48:02 -0700 | [diff] [blame] | 1143 | 					index, last_index - index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1144 | 		} | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1145 | 		if (!PageUptodate(page)) { | 
 | 1146 | 			if (inode->i_blkbits == PAGE_CACHE_SHIFT || | 
 | 1147 | 					!mapping->a_ops->is_partially_uptodate) | 
 | 1148 | 				goto page_not_up_to_date; | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 1149 | 			if (!trylock_page(page)) | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1150 | 				goto page_not_up_to_date; | 
| Dave Hansen | 8d056cb | 2010-11-11 14:05:15 -0800 | [diff] [blame] | 1151 | 			/* Did it get truncated before we got the lock? */ | 
 | 1152 | 			if (!page->mapping) | 
 | 1153 | 				goto page_not_up_to_date_locked; | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1154 | 			if (!mapping->a_ops->is_partially_uptodate(page, | 
 | 1155 | 								desc, offset)) | 
 | 1156 | 				goto page_not_up_to_date_locked; | 
 | 1157 | 			unlock_page(page); | 
 | 1158 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | page_ok: | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1160 | 		/* | 
 | 1161 | 		 * i_size must be checked after we know the page is Uptodate. | 
 | 1162 | 		 * | 
 | 1163 | 		 * Checking i_size after the check allows us to calculate | 
 | 1164 | 		 * the correct value for "nr", which means the zero-filled | 
 | 1165 | 		 * part of the page is not copied back to userspace (unless | 
 | 1166 | 		 * another truncate extends the file - this is desired though). | 
 | 1167 | 		 */ | 
 | 1168 |  | 
 | 1169 | 		isize = i_size_read(inode); | 
 | 1170 | 		end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | 
 | 1171 | 		if (unlikely(!isize || index > end_index)) { | 
 | 1172 | 			page_cache_release(page); | 
 | 1173 | 			goto out; | 
 | 1174 | 		} | 
 | 1175 |  | 
 | 1176 | 		/* nr is the maximum number of bytes to copy from this page */ | 
 | 1177 | 		nr = PAGE_CACHE_SIZE; | 
 | 1178 | 		if (index == end_index) { | 
 | 1179 | 			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | 
 | 1180 | 			if (nr <= offset) { | 
 | 1181 | 				page_cache_release(page); | 
 | 1182 | 				goto out; | 
 | 1183 | 			} | 
 | 1184 | 		} | 
 | 1185 | 		nr = nr - offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1186 |  | 
 | 1187 | 		/* If users can be writing to this page using arbitrary | 
 | 1188 | 		 * virtual addresses, take care about potential aliasing | 
 | 1189 | 		 * before reading the page on the kernel side. | 
 | 1190 | 		 */ | 
 | 1191 | 		if (mapping_writably_mapped(mapping)) | 
 | 1192 | 			flush_dcache_page(page); | 
 | 1193 |  | 
 | 1194 | 		/* | 
| Jan Kara | ec0f163 | 2007-05-06 14:49:25 -0700 | [diff] [blame] | 1195 | 		 * When a sequential read accesses a page several times, | 
 | 1196 | 		 * only mark it as accessed the first time. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | 		 */ | 
| Jan Kara | ec0f163 | 2007-05-06 14:49:25 -0700 | [diff] [blame] | 1198 | 		if (prev_index != index || offset != prev_offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 | 			mark_page_accessed(page); | 
 | 1200 | 		prev_index = index; | 
 | 1201 |  | 
 | 1202 | 		/* | 
 | 1203 | 		 * Ok, we have the page, and it's up-to-date, so | 
 | 1204 | 		 * now we can copy it to user space... | 
 | 1205 | 		 * | 
 | 1206 | 		 * The actor routine returns how many bytes were actually used.. | 
 | 1207 | 		 * NOTE! This may not be the same as how much of a user buffer | 
 | 1208 | 		 * we filled up (we may be padding etc), so we can only update | 
 | 1209 | 		 * "pos" here (the actor routine has to update the user buffer | 
 | 1210 | 		 * pointers and the remaining count). | 
 | 1211 | 		 */ | 
 | 1212 | 		ret = actor(desc, page, offset, nr); | 
 | 1213 | 		offset += ret; | 
 | 1214 | 		index += offset >> PAGE_CACHE_SHIFT; | 
 | 1215 | 		offset &= ~PAGE_CACHE_MASK; | 
| Jan Kara | 6ce745e | 2007-05-06 14:49:26 -0700 | [diff] [blame] | 1216 | 		prev_offset = offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 |  | 
 | 1218 | 		page_cache_release(page); | 
 | 1219 | 		if (ret == nr && desc->count) | 
 | 1220 | 			continue; | 
 | 1221 | 		goto out; | 
 | 1222 |  | 
 | 1223 | page_not_up_to_date: | 
 | 1224 | 		/* Get exclusive access to the page ... */ | 
| Oleg Nesterov | 8546232 | 2008-06-08 21:20:43 +0400 | [diff] [blame] | 1225 | 		error = lock_page_killable(page); | 
 | 1226 | 		if (unlikely(error)) | 
 | 1227 | 			goto readpage_error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 |  | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1229 | page_not_up_to_date_locked: | 
| Nick Piggin | da6052f | 2006-09-25 23:31:35 -0700 | [diff] [blame] | 1230 | 		/* Did it get truncated before we got the lock? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | 		if (!page->mapping) { | 
 | 1232 | 			unlock_page(page); | 
 | 1233 | 			page_cache_release(page); | 
 | 1234 | 			continue; | 
 | 1235 | 		} | 
 | 1236 |  | 
 | 1237 | 		/* Did somebody else fill it already? */ | 
 | 1238 | 		if (PageUptodate(page)) { | 
 | 1239 | 			unlock_page(page); | 
 | 1240 | 			goto page_ok; | 
 | 1241 | 		} | 
 | 1242 |  | 
 | 1243 | readpage: | 
| Jeff Moyer | 91803b4 | 2010-05-26 11:49:40 -0400 | [diff] [blame] | 1244 | 		/* | 
 | 1245 | 		 * A previous I/O error may have been due to temporary | 
 | 1246 | 		 * failures, eg. multipath errors. | 
 | 1247 | 		 * PG_error will be set again if readpage fails. | 
 | 1248 | 		 */ | 
 | 1249 | 		ClearPageError(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1250 | 		/* Start the actual read. The read will unlock the page. */ | 
 | 1251 | 		error = mapping->a_ops->readpage(filp, page); | 
 | 1252 |  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1253 | 		if (unlikely(error)) { | 
 | 1254 | 			if (error == AOP_TRUNCATED_PAGE) { | 
 | 1255 | 				page_cache_release(page); | 
 | 1256 | 				goto find_page; | 
 | 1257 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1258 | 			goto readpage_error; | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1259 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 |  | 
 | 1261 | 		if (!PageUptodate(page)) { | 
| Oleg Nesterov | 8546232 | 2008-06-08 21:20:43 +0400 | [diff] [blame] | 1262 | 			error = lock_page_killable(page); | 
 | 1263 | 			if (unlikely(error)) | 
 | 1264 | 				goto readpage_error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1265 | 			if (!PageUptodate(page)) { | 
 | 1266 | 				if (page->mapping == NULL) { | 
 | 1267 | 					/* | 
| Christoph Hellwig | 2ecdc82 | 2010-01-26 17:27:20 +0100 | [diff] [blame] | 1268 | 					 * invalidate_mapping_pages got it | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | 					 */ | 
 | 1270 | 					unlock_page(page); | 
 | 1271 | 					page_cache_release(page); | 
 | 1272 | 					goto find_page; | 
 | 1273 | 				} | 
 | 1274 | 				unlock_page(page); | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 1275 | 				shrink_readahead_size_eio(filp, ra); | 
| Oleg Nesterov | 8546232 | 2008-06-08 21:20:43 +0400 | [diff] [blame] | 1276 | 				error = -EIO; | 
 | 1277 | 				goto readpage_error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | 			} | 
 | 1279 | 			unlock_page(page); | 
 | 1280 | 		} | 
 | 1281 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1282 | 		goto page_ok; | 
 | 1283 |  | 
 | 1284 | readpage_error: | 
 | 1285 | 		/* UHHUH! A synchronous read error occurred. Report it */ | 
 | 1286 | 		desc->error = error; | 
 | 1287 | 		page_cache_release(page); | 
 | 1288 | 		goto out; | 
 | 1289 |  | 
 | 1290 | no_cached_page: | 
 | 1291 | 		/* | 
 | 1292 | 		 * Ok, it wasn't cached, so we need to create a new | 
 | 1293 | 		 * page.. | 
 | 1294 | 		 */ | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1295 | 		page = page_cache_alloc_cold(mapping); | 
 | 1296 | 		if (!page) { | 
 | 1297 | 			desc->error = -ENOMEM; | 
 | 1298 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | 		} | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1300 | 		error = add_to_page_cache_lru(page, mapping, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | 						index, GFP_KERNEL); | 
 | 1302 | 		if (error) { | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1303 | 			page_cache_release(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1304 | 			if (error == -EEXIST) | 
 | 1305 | 				goto find_page; | 
 | 1306 | 			desc->error = error; | 
 | 1307 | 			goto out; | 
 | 1308 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1309 | 		goto readpage; | 
 | 1310 | 	} | 
 | 1311 |  | 
 | 1312 | out: | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 1313 | 	ra->prev_pos = prev_index; | 
 | 1314 | 	ra->prev_pos <<= PAGE_CACHE_SHIFT; | 
 | 1315 | 	ra->prev_pos |= prev_offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 |  | 
| Fengguang Wu | f4e6b49 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 1317 | 	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; | 
| Krishna Kumar | 0c6aa26 | 2008-10-15 22:01:13 -0700 | [diff] [blame] | 1318 | 	file_accessed(filp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 |  | 
 | 1321 | int file_read_actor(read_descriptor_t *desc, struct page *page, | 
 | 1322 | 			unsigned long offset, unsigned long size) | 
 | 1323 | { | 
 | 1324 | 	char *kaddr; | 
 | 1325 | 	unsigned long left, count = desc->count; | 
 | 1326 |  | 
 | 1327 | 	if (size > count) | 
 | 1328 | 		size = count; | 
 | 1329 |  | 
 | 1330 | 	/* | 
 | 1331 | 	 * Faults on the destination of a read are common, so do it before | 
 | 1332 | 	 * taking the kmap. | 
 | 1333 | 	 */ | 
 | 1334 | 	if (!fault_in_pages_writeable(desc->arg.buf, size)) { | 
 | 1335 | 		kaddr = kmap_atomic(page, KM_USER0); | 
 | 1336 | 		left = __copy_to_user_inatomic(desc->arg.buf, | 
 | 1337 | 						kaddr + offset, size); | 
 | 1338 | 		kunmap_atomic(kaddr, KM_USER0); | 
 | 1339 | 		if (left == 0) | 
 | 1340 | 			goto success; | 
 | 1341 | 	} | 
 | 1342 |  | 
 | 1343 | 	/* Do it the slow way */ | 
 | 1344 | 	kaddr = kmap(page); | 
 | 1345 | 	left = __copy_to_user(desc->arg.buf, kaddr + offset, size); | 
 | 1346 | 	kunmap(page); | 
 | 1347 |  | 
 | 1348 | 	if (left) { | 
 | 1349 | 		size -= left; | 
 | 1350 | 		desc->error = -EFAULT; | 
 | 1351 | 	} | 
 | 1352 | success: | 
 | 1353 | 	desc->count = count - size; | 
 | 1354 | 	desc->written += size; | 
 | 1355 | 	desc->arg.buf += size; | 
 | 1356 | 	return size; | 
 | 1357 | } | 
 | 1358 |  | 
| Dmitriy Monakhov | 0ceb331 | 2007-05-08 00:23:02 -0700 | [diff] [blame] | 1359 | /* | 
 | 1360 |  * Performs necessary checks before doing a write | 
 | 1361 |  * @iov:	io vector request | 
 | 1362 |  * @nr_segs:	number of segments in the iovec | 
 | 1363 |  * @count:	number of bytes to write | 
 | 1364 |  * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE | 
 | 1365 |  * | 
 | 1366 |  * Adjust number of segments and amount of bytes to write (nr_segs should be | 
 | 1367 |  * properly initialized first). Returns appropriate error code that caller | 
 | 1368 |  * should return or zero in case that write should be allowed. | 
 | 1369 |  */ | 
 | 1370 | int generic_segment_checks(const struct iovec *iov, | 
 | 1371 | 			unsigned long *nr_segs, size_t *count, int access_flags) | 
 | 1372 | { | 
 | 1373 | 	unsigned long   seg; | 
 | 1374 | 	size_t cnt = 0; | 
 | 1375 | 	for (seg = 0; seg < *nr_segs; seg++) { | 
 | 1376 | 		const struct iovec *iv = &iov[seg]; | 
 | 1377 |  | 
 | 1378 | 		/* | 
 | 1379 | 		 * If any segment has a negative length, or the cumulative | 
 | 1380 | 		 * length ever wraps negative then return -EINVAL. | 
 | 1381 | 		 */ | 
 | 1382 | 		cnt += iv->iov_len; | 
 | 1383 | 		if (unlikely((ssize_t)(cnt|iv->iov_len) < 0)) | 
 | 1384 | 			return -EINVAL; | 
 | 1385 | 		if (access_ok(access_flags, iv->iov_base, iv->iov_len)) | 
 | 1386 | 			continue; | 
 | 1387 | 		if (seg == 0) | 
 | 1388 | 			return -EFAULT; | 
 | 1389 | 		*nr_segs = seg; | 
 | 1390 | 		cnt -= iv->iov_len;	/* This segment is no good */ | 
 | 1391 | 		break; | 
 | 1392 | 	} | 
 | 1393 | 	*count = cnt; | 
 | 1394 | 	return 0; | 
 | 1395 | } | 
 | 1396 | EXPORT_SYMBOL(generic_segment_checks); | 
 | 1397 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1398 | /** | 
| Henrik Kretzschmar | b2abacf | 2006-10-04 02:15:22 -0700 | [diff] [blame] | 1399 |  * generic_file_aio_read - generic filesystem read routine | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1400 |  * @iocb:	kernel I/O control block | 
 | 1401 |  * @iov:	io vector request | 
 | 1402 |  * @nr_segs:	number of segments in the iovec | 
| Henrik Kretzschmar | b2abacf | 2006-10-04 02:15:22 -0700 | [diff] [blame] | 1403 |  * @pos:	current file position | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1404 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 |  * This is the "read()" routine for all filesystems | 
 | 1406 |  * that can use the page cache directly. | 
 | 1407 |  */ | 
 | 1408 | ssize_t | 
| Badari Pulavarty | 543ade1 | 2006-09-30 23:28:48 -0700 | [diff] [blame] | 1409 | generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | 
 | 1410 | 		unsigned long nr_segs, loff_t pos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | { | 
 | 1412 | 	struct file *filp = iocb->ki_filp; | 
 | 1413 | 	ssize_t retval; | 
| Josef Bacik | 66f998f | 2010-05-23 11:00:54 -0400 | [diff] [blame] | 1414 | 	unsigned long seg = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | 	size_t count; | 
| Badari Pulavarty | 543ade1 | 2006-09-30 23:28:48 -0700 | [diff] [blame] | 1416 | 	loff_t *ppos = &iocb->ki_pos; | 
| Jens Axboe | 55602dd | 2010-06-24 15:05:37 +0200 | [diff] [blame] | 1417 | 	struct blk_plug plug; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1418 |  | 
 | 1419 | 	count = 0; | 
| Dmitriy Monakhov | 0ceb331 | 2007-05-08 00:23:02 -0700 | [diff] [blame] | 1420 | 	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); | 
 | 1421 | 	if (retval) | 
 | 1422 | 		return retval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 |  | 
| Jens Axboe | 55602dd | 2010-06-24 15:05:37 +0200 | [diff] [blame] | 1424 | 	blk_start_plug(&plug); | 
 | 1425 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ | 
 | 1427 | 	if (filp->f_flags & O_DIRECT) { | 
| Badari Pulavarty | 543ade1 | 2006-09-30 23:28:48 -0700 | [diff] [blame] | 1428 | 		loff_t size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | 		struct address_space *mapping; | 
 | 1430 | 		struct inode *inode; | 
 | 1431 |  | 
 | 1432 | 		mapping = filp->f_mapping; | 
 | 1433 | 		inode = mapping->host; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | 		if (!count) | 
 | 1435 | 			goto out; /* skip atime */ | 
 | 1436 | 		size = i_size_read(inode); | 
 | 1437 | 		if (pos < size) { | 
| Nick Piggin | 48b47c5 | 2009-01-06 14:40:22 -0800 | [diff] [blame] | 1438 | 			retval = filemap_write_and_wait_range(mapping, pos, | 
 | 1439 | 					pos + iov_length(iov, nr_segs) - 1); | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 1440 | 			if (!retval) { | 
 | 1441 | 				retval = mapping->a_ops->direct_IO(READ, iocb, | 
 | 1442 | 							iov, pos, nr_segs); | 
 | 1443 | 			} | 
| Josef Bacik | 66f998f | 2010-05-23 11:00:54 -0400 | [diff] [blame] | 1444 | 			if (retval > 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 | 				*ppos = pos + retval; | 
| Josef Bacik | 66f998f | 2010-05-23 11:00:54 -0400 | [diff] [blame] | 1446 | 				count -= retval; | 
 | 1447 | 			} | 
 | 1448 |  | 
 | 1449 | 			/* | 
 | 1450 | 			 * Btrfs can have a short DIO read if we encounter | 
 | 1451 | 			 * compressed extents, so if there was an error, or if | 
 | 1452 | 			 * we've already read everything we wanted to, or if | 
 | 1453 | 			 * there was a short read because we hit EOF, go ahead | 
 | 1454 | 			 * and return.  Otherwise fallthrough to buffered io for | 
 | 1455 | 			 * the rest of the read. | 
 | 1456 | 			 */ | 
 | 1457 | 			if (retval < 0 || !count || *ppos >= size) { | 
| Hugh Dickins | 11fa977 | 2008-07-23 21:27:34 -0700 | [diff] [blame] | 1458 | 				file_accessed(filp); | 
 | 1459 | 				goto out; | 
 | 1460 | 			} | 
| Steven Whitehouse | 0e0bcae | 2006-09-27 14:45:07 -0400 | [diff] [blame] | 1461 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 | 	} | 
 | 1463 |  | 
| Josef Bacik | 66f998f | 2010-05-23 11:00:54 -0400 | [diff] [blame] | 1464 | 	count = retval; | 
| Hugh Dickins | 11fa977 | 2008-07-23 21:27:34 -0700 | [diff] [blame] | 1465 | 	for (seg = 0; seg < nr_segs; seg++) { | 
 | 1466 | 		read_descriptor_t desc; | 
| Josef Bacik | 66f998f | 2010-05-23 11:00:54 -0400 | [diff] [blame] | 1467 | 		loff_t offset = 0; | 
 | 1468 |  | 
 | 1469 | 		/* | 
 | 1470 | 		 * If we did a short DIO read we need to skip the section of the | 
 | 1471 | 		 * iov that we've already read data into. | 
 | 1472 | 		 */ | 
 | 1473 | 		if (count) { | 
 | 1474 | 			if (count > iov[seg].iov_len) { | 
 | 1475 | 				count -= iov[seg].iov_len; | 
 | 1476 | 				continue; | 
 | 1477 | 			} | 
 | 1478 | 			offset = count; | 
 | 1479 | 			count = 0; | 
 | 1480 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 |  | 
| Hugh Dickins | 11fa977 | 2008-07-23 21:27:34 -0700 | [diff] [blame] | 1482 | 		desc.written = 0; | 
| Josef Bacik | 66f998f | 2010-05-23 11:00:54 -0400 | [diff] [blame] | 1483 | 		desc.arg.buf = iov[seg].iov_base + offset; | 
 | 1484 | 		desc.count = iov[seg].iov_len - offset; | 
| Hugh Dickins | 11fa977 | 2008-07-23 21:27:34 -0700 | [diff] [blame] | 1485 | 		if (desc.count == 0) | 
 | 1486 | 			continue; | 
 | 1487 | 		desc.error = 0; | 
 | 1488 | 		do_generic_file_read(filp, ppos, &desc, file_read_actor); | 
 | 1489 | 		retval += desc.written; | 
 | 1490 | 		if (desc.error) { | 
 | 1491 | 			retval = retval ?: desc.error; | 
 | 1492 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1493 | 		} | 
| Hugh Dickins | 11fa977 | 2008-07-23 21:27:34 -0700 | [diff] [blame] | 1494 | 		if (desc.count > 0) | 
 | 1495 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1496 | 	} | 
 | 1497 | out: | 
| Jens Axboe | 55602dd | 2010-06-24 15:05:37 +0200 | [diff] [blame] | 1498 | 	blk_finish_plug(&plug); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1499 | 	return retval; | 
 | 1500 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | EXPORT_SYMBOL(generic_file_aio_read); | 
 | 1502 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | static ssize_t | 
 | 1504 | do_readahead(struct address_space *mapping, struct file *filp, | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1505 | 	     pgoff_t index, unsigned long nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | { | 
 | 1507 | 	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) | 
 | 1508 | 		return -EINVAL; | 
 | 1509 |  | 
| Wu Fengguang | f7e839d | 2009-06-16 15:31:20 -0700 | [diff] [blame] | 1510 | 	force_page_cache_readahead(mapping, filp, index, nr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1511 | 	return 0; | 
 | 1512 | } | 
 | 1513 |  | 
| Heiko Carstens | 6673e0c | 2009-01-14 14:14:02 +0100 | [diff] [blame] | 1514 | SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | { | 
 | 1516 | 	ssize_t ret; | 
 | 1517 | 	struct file *file; | 
 | 1518 |  | 
 | 1519 | 	ret = -EBADF; | 
 | 1520 | 	file = fget(fd); | 
 | 1521 | 	if (file) { | 
 | 1522 | 		if (file->f_mode & FMODE_READ) { | 
 | 1523 | 			struct address_space *mapping = file->f_mapping; | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1524 | 			pgoff_t start = offset >> PAGE_CACHE_SHIFT; | 
 | 1525 | 			pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 | 			unsigned long len = end - start + 1; | 
 | 1527 | 			ret = do_readahead(mapping, file, start, len); | 
 | 1528 | 		} | 
 | 1529 | 		fput(file); | 
 | 1530 | 	} | 
 | 1531 | 	return ret; | 
 | 1532 | } | 
| Heiko Carstens | 6673e0c | 2009-01-14 14:14:02 +0100 | [diff] [blame] | 1533 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS | 
 | 1534 | asmlinkage long SyS_readahead(long fd, loff_t offset, long count) | 
 | 1535 | { | 
 | 1536 | 	return SYSC_readahead((int) fd, offset, (size_t) count); | 
 | 1537 | } | 
 | 1538 | SYSCALL_ALIAS(sys_readahead, SyS_readahead); | 
 | 1539 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 |  | 
 | 1541 | #ifdef CONFIG_MMU | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1542 | /** | 
 | 1543 |  * page_cache_read - adds requested page to the page cache if not already there | 
 | 1544 |  * @file:	file to read | 
 | 1545 |  * @offset:	page index | 
 | 1546 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 |  * This adds the requested page to the page cache if it isn't already there, | 
 | 1548 |  * and schedules an I/O to read in its contents from disk. | 
 | 1549 |  */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 1550 | static int page_cache_read(struct file *file, pgoff_t offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | { | 
 | 1552 | 	struct address_space *mapping = file->f_mapping; | 
 | 1553 | 	struct page *page;  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1554 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 |  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1556 | 	do { | 
 | 1557 | 		page = page_cache_alloc_cold(mapping); | 
 | 1558 | 		if (!page) | 
 | 1559 | 			return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 |  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1561 | 		ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); | 
 | 1562 | 		if (ret == 0) | 
 | 1563 | 			ret = mapping->a_ops->readpage(file, page); | 
 | 1564 | 		else if (ret == -EEXIST) | 
 | 1565 | 			ret = 0; /* losing race to add is OK */ | 
 | 1566 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1567 | 		page_cache_release(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1568 |  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1569 | 	} while (ret == AOP_TRUNCATED_PAGE); | 
 | 1570 | 		 | 
 | 1571 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | } | 
 | 1573 |  | 
 | 1574 | #define MMAP_LOTSAMISS  (100) | 
 | 1575 |  | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1576 | /* | 
 | 1577 |  * Synchronous readahead happens when we don't even find | 
 | 1578 |  * a page in the page cache at all. | 
 | 1579 |  */ | 
 | 1580 | static void do_sync_mmap_readahead(struct vm_area_struct *vma, | 
 | 1581 | 				   struct file_ra_state *ra, | 
 | 1582 | 				   struct file *file, | 
 | 1583 | 				   pgoff_t offset) | 
 | 1584 | { | 
 | 1585 | 	unsigned long ra_pages; | 
 | 1586 | 	struct address_space *mapping = file->f_mapping; | 
 | 1587 |  | 
 | 1588 | 	/* If we don't want any read-ahead, don't bother */ | 
 | 1589 | 	if (VM_RandomReadHint(vma)) | 
 | 1590 | 		return; | 
| Wu Fengguang | 275b12b | 2011-05-24 17:12:28 -0700 | [diff] [blame] | 1591 | 	if (!ra->ra_pages) | 
 | 1592 | 		return; | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1593 |  | 
| Wu Fengguang | 2cbea1d3a | 2011-05-24 17:12:30 -0700 | [diff] [blame] | 1594 | 	if (VM_SequentialReadHint(vma)) { | 
| Wu Fengguang | 7ffc59b | 2009-06-16 15:31:38 -0700 | [diff] [blame] | 1595 | 		page_cache_sync_readahead(mapping, ra, file, offset, | 
 | 1596 | 					  ra->ra_pages); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1597 | 		return; | 
 | 1598 | 	} | 
 | 1599 |  | 
| Andi Kleen | 207d04b | 2011-05-24 17:12:29 -0700 | [diff] [blame] | 1600 | 	/* Avoid banging the cache line if not needed */ | 
 | 1601 | 	if (ra->mmap_miss < MMAP_LOTSAMISS * 10) | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1602 | 		ra->mmap_miss++; | 
 | 1603 |  | 
 | 1604 | 	/* | 
 | 1605 | 	 * Do we miss much more than hit in this file? If so, | 
 | 1606 | 	 * stop bothering with read-ahead. It will only hurt. | 
 | 1607 | 	 */ | 
 | 1608 | 	if (ra->mmap_miss > MMAP_LOTSAMISS) | 
 | 1609 | 		return; | 
 | 1610 |  | 
| Wu Fengguang | d30a110 | 2009-06-16 15:31:30 -0700 | [diff] [blame] | 1611 | 	/* | 
 | 1612 | 	 * mmap read-around | 
 | 1613 | 	 */ | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1614 | 	ra_pages = max_sane_readahead(ra->ra_pages); | 
| Wu Fengguang | 275b12b | 2011-05-24 17:12:28 -0700 | [diff] [blame] | 1615 | 	ra->start = max_t(long, 0, offset - ra_pages / 2); | 
 | 1616 | 	ra->size = ra_pages; | 
| Wu Fengguang | 2cbea1d3a | 2011-05-24 17:12:30 -0700 | [diff] [blame] | 1617 | 	ra->async_size = ra_pages / 4; | 
| Wu Fengguang | 275b12b | 2011-05-24 17:12:28 -0700 | [diff] [blame] | 1618 | 	ra_submit(ra, mapping, file); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1619 | } | 
 | 1620 |  | 
 | 1621 | /* | 
 | 1622 |  * Asynchronous readahead happens when we find the page and PG_readahead, | 
 | 1623 |  * so we want to possibly extend the readahead further.. | 
 | 1624 |  */ | 
 | 1625 | static void do_async_mmap_readahead(struct vm_area_struct *vma, | 
 | 1626 | 				    struct file_ra_state *ra, | 
 | 1627 | 				    struct file *file, | 
 | 1628 | 				    struct page *page, | 
 | 1629 | 				    pgoff_t offset) | 
 | 1630 | { | 
 | 1631 | 	struct address_space *mapping = file->f_mapping; | 
 | 1632 |  | 
 | 1633 | 	/* If we don't want any read-ahead, don't bother */ | 
 | 1634 | 	if (VM_RandomReadHint(vma)) | 
 | 1635 | 		return; | 
 | 1636 | 	if (ra->mmap_miss > 0) | 
 | 1637 | 		ra->mmap_miss--; | 
 | 1638 | 	if (PageReadahead(page)) | 
| Wu Fengguang | 2fad6f5 | 2009-06-16 15:31:29 -0700 | [diff] [blame] | 1639 | 		page_cache_async_readahead(mapping, ra, file, | 
 | 1640 | 					   page, offset, ra->ra_pages); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1641 | } | 
 | 1642 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1643 | /** | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 1644 |  * filemap_fault - read in file data for page fault handling | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1645 |  * @vma:	vma in which the fault was taken | 
 | 1646 |  * @vmf:	struct vm_fault containing details of the fault | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1647 |  * | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 1648 |  * filemap_fault() is invoked via the vma operations vector for a | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 |  * mapped memory region to read in file data during a page fault. | 
 | 1650 |  * | 
 | 1651 |  * The goto's are kind of ugly, but this streamlines the normal case of having | 
 | 1652 |  * it in the page cache, and handles the special cases reasonably without | 
 | 1653 |  * having a lot of duplicated code. | 
 | 1654 |  */ | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1655 | int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1656 | { | 
 | 1657 | 	int error; | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 1658 | 	struct file *file = vma->vm_file; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1659 | 	struct address_space *mapping = file->f_mapping; | 
 | 1660 | 	struct file_ra_state *ra = &file->f_ra; | 
 | 1661 | 	struct inode *inode = mapping->host; | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1662 | 	pgoff_t offset = vmf->pgoff; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | 	struct page *page; | 
| Jan Kara | 2004dc8 | 2008-02-08 04:20:11 -0800 | [diff] [blame] | 1664 | 	pgoff_t size; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1665 | 	int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1666 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | 	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1668 | 	if (offset >= size) | 
| Linus Torvalds | 5307cc1 | 2007-10-31 09:19:46 -0700 | [diff] [blame] | 1669 | 		return VM_FAULT_SIGBUS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1670 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1672 | 	 * Do we have something in the page cache already? | 
 | 1673 | 	 */ | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1674 | 	page = find_get_page(mapping, offset); | 
 | 1675 | 	if (likely(page)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1676 | 		/* | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1677 | 		 * We found the page, so try async readahead before | 
 | 1678 | 		 * waiting for the lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1679 | 		 */ | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1680 | 		do_async_mmap_readahead(vma, ra, file, page, offset); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1681 | 	} else { | 
 | 1682 | 		/* No page in the page cache at all */ | 
 | 1683 | 		do_sync_mmap_readahead(vma, ra, file, offset); | 
 | 1684 | 		count_vm_event(PGMAJFAULT); | 
| Ying Han | 456f998 | 2011-05-26 16:25:38 -0700 | [diff] [blame] | 1685 | 		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1686 | 		ret = VM_FAULT_MAJOR; | 
 | 1687 | retry_find: | 
| Michel Lespinasse | b522c94 | 2010-10-26 14:21:56 -0700 | [diff] [blame] | 1688 | 		page = find_get_page(mapping, offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1689 | 		if (!page) | 
 | 1690 | 			goto no_cached_page; | 
 | 1691 | 	} | 
 | 1692 |  | 
| Michel Lespinasse | d88c092 | 2010-11-02 13:05:18 -0700 | [diff] [blame] | 1693 | 	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { | 
 | 1694 | 		page_cache_release(page); | 
| Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1695 | 		return ret | VM_FAULT_RETRY; | 
| Michel Lespinasse | d88c092 | 2010-11-02 13:05:18 -0700 | [diff] [blame] | 1696 | 	} | 
| Michel Lespinasse | b522c94 | 2010-10-26 14:21:56 -0700 | [diff] [blame] | 1697 |  | 
 | 1698 | 	/* Did it get truncated? */ | 
 | 1699 | 	if (unlikely(page->mapping != mapping)) { | 
 | 1700 | 		unlock_page(page); | 
 | 1701 | 		put_page(page); | 
 | 1702 | 		goto retry_find; | 
 | 1703 | 	} | 
 | 1704 | 	VM_BUG_ON(page->index != offset); | 
 | 1705 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1706 | 	/* | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1707 | 	 * We have a locked page in the page cache, now we need to check | 
 | 1708 | 	 * that it's up-to-date. If not, it is going to be due to an error. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1709 | 	 */ | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1710 | 	if (unlikely(!PageUptodate(page))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 | 		goto page_not_uptodate; | 
 | 1712 |  | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1713 | 	/* | 
 | 1714 | 	 * Found the page and have a reference on it. | 
 | 1715 | 	 * We must recheck i_size under page lock. | 
 | 1716 | 	 */ | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1717 | 	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1718 | 	if (unlikely(offset >= size)) { | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1719 | 		unlock_page(page); | 
| Yan Zheng | 745ad48 | 2007-10-08 10:08:37 -0700 | [diff] [blame] | 1720 | 		page_cache_release(page); | 
| Linus Torvalds | 5307cc1 | 2007-10-31 09:19:46 -0700 | [diff] [blame] | 1721 | 		return VM_FAULT_SIGBUS; | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1722 | 	} | 
 | 1723 |  | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1724 | 	vmf->page = page; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1725 | 	return ret | VM_FAULT_LOCKED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1726 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1727 | no_cached_page: | 
 | 1728 | 	/* | 
 | 1729 | 	 * We're only likely to ever get here if MADV_RANDOM is in | 
 | 1730 | 	 * effect. | 
 | 1731 | 	 */ | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 1732 | 	error = page_cache_read(file, offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1733 |  | 
 | 1734 | 	/* | 
 | 1735 | 	 * The page we want has now been added to the page cache. | 
 | 1736 | 	 * In the unlikely event that someone removed it in the | 
 | 1737 | 	 * meantime, we'll just come back here and read it again. | 
 | 1738 | 	 */ | 
 | 1739 | 	if (error >= 0) | 
 | 1740 | 		goto retry_find; | 
 | 1741 |  | 
 | 1742 | 	/* | 
 | 1743 | 	 * An error return from page_cache_read can result if the | 
 | 1744 | 	 * system is low on memory, or a problem occurs while trying | 
 | 1745 | 	 * to schedule I/O. | 
 | 1746 | 	 */ | 
 | 1747 | 	if (error == -ENOMEM) | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1748 | 		return VM_FAULT_OOM; | 
 | 1749 | 	return VM_FAULT_SIGBUS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 |  | 
 | 1751 | page_not_uptodate: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1752 | 	/* | 
 | 1753 | 	 * Umm, take care of errors if the page isn't up-to-date. | 
 | 1754 | 	 * Try to re-read it _once_. We do this synchronously, | 
 | 1755 | 	 * because there really aren't any performance issues here | 
 | 1756 | 	 * and we need to check for errors. | 
 | 1757 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1758 | 	ClearPageError(page); | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1759 | 	error = mapping->a_ops->readpage(file, page); | 
| Miklos Szeredi | 3ef0f72 | 2008-05-14 16:05:37 -0700 | [diff] [blame] | 1760 | 	if (!error) { | 
 | 1761 | 		wait_on_page_locked(page); | 
 | 1762 | 		if (!PageUptodate(page)) | 
 | 1763 | 			error = -EIO; | 
 | 1764 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1765 | 	page_cache_release(page); | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1766 |  | 
 | 1767 | 	if (!error || error == AOP_TRUNCATED_PAGE) | 
 | 1768 | 		goto retry_find; | 
 | 1769 |  | 
 | 1770 | 	/* Things didn't work out. Return zero to tell the mm layer so. */ | 
 | 1771 | 	shrink_readahead_size_eio(file, ra); | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1772 | 	return VM_FAULT_SIGBUS; | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 1773 | } | 
 | 1774 | EXPORT_SYMBOL(filemap_fault); | 
 | 1775 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 1776 | const struct vm_operations_struct generic_file_vm_ops = { | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 1777 | 	.fault		= filemap_fault, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1778 | }; | 
 | 1779 |  | 
 | 1780 | /* This is used for a general mmap of a disk file */ | 
 | 1781 |  | 
 | 1782 | int generic_file_mmap(struct file * file, struct vm_area_struct * vma) | 
 | 1783 | { | 
 | 1784 | 	struct address_space *mapping = file->f_mapping; | 
 | 1785 |  | 
 | 1786 | 	if (!mapping->a_ops->readpage) | 
 | 1787 | 		return -ENOEXEC; | 
 | 1788 | 	file_accessed(file); | 
 | 1789 | 	vma->vm_ops = &generic_file_vm_ops; | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1790 | 	vma->vm_flags |= VM_CAN_NONLINEAR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | 	return 0; | 
 | 1792 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1793 |  | 
 | 1794 | /* | 
 | 1795 |  * This is for filesystems which do not implement ->writepage. | 
 | 1796 |  */ | 
 | 1797 | int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) | 
 | 1798 | { | 
 | 1799 | 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) | 
 | 1800 | 		return -EINVAL; | 
 | 1801 | 	return generic_file_mmap(file, vma); | 
 | 1802 | } | 
 | 1803 | #else | 
 | 1804 | int generic_file_mmap(struct file * file, struct vm_area_struct * vma) | 
 | 1805 | { | 
 | 1806 | 	return -ENOSYS; | 
 | 1807 | } | 
 | 1808 | int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) | 
 | 1809 | { | 
 | 1810 | 	return -ENOSYS; | 
 | 1811 | } | 
 | 1812 | #endif /* CONFIG_MMU */ | 
 | 1813 |  | 
 | 1814 | EXPORT_SYMBOL(generic_file_mmap); | 
 | 1815 | EXPORT_SYMBOL(generic_file_readonly_mmap); | 
 | 1816 |  | 
| Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 1817 | static struct page *__read_cache_page(struct address_space *mapping, | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1818 | 				pgoff_t index, | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 1819 | 				int (*filler)(void *, struct page *), | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1820 | 				void *data, | 
 | 1821 | 				gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1822 | { | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1823 | 	struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1824 | 	int err; | 
 | 1825 | repeat: | 
 | 1826 | 	page = find_get_page(mapping, index); | 
 | 1827 | 	if (!page) { | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1828 | 		page = __page_cache_alloc(gfp | __GFP_COLD); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1829 | 		if (!page) | 
 | 1830 | 			return ERR_PTR(-ENOMEM); | 
 | 1831 | 		err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); | 
 | 1832 | 		if (unlikely(err)) { | 
 | 1833 | 			page_cache_release(page); | 
 | 1834 | 			if (err == -EEXIST) | 
 | 1835 | 				goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | 			/* Presumably ENOMEM for radix tree node */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1837 | 			return ERR_PTR(err); | 
 | 1838 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1839 | 		err = filler(data, page); | 
 | 1840 | 		if (err < 0) { | 
 | 1841 | 			page_cache_release(page); | 
 | 1842 | 			page = ERR_PTR(err); | 
 | 1843 | 		} | 
 | 1844 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1845 | 	return page; | 
 | 1846 | } | 
 | 1847 |  | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1848 | static struct page *do_read_cache_page(struct address_space *mapping, | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1849 | 				pgoff_t index, | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 1850 | 				int (*filler)(void *, struct page *), | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1851 | 				void *data, | 
 | 1852 | 				gfp_t gfp) | 
 | 1853 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1854 | { | 
 | 1855 | 	struct page *page; | 
 | 1856 | 	int err; | 
 | 1857 |  | 
 | 1858 | retry: | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1859 | 	page = __read_cache_page(mapping, index, filler, data, gfp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1860 | 	if (IS_ERR(page)) | 
| David Howells | c855ff3 | 2007-05-09 13:42:20 +0100 | [diff] [blame] | 1861 | 		return page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1862 | 	if (PageUptodate(page)) | 
 | 1863 | 		goto out; | 
 | 1864 |  | 
 | 1865 | 	lock_page(page); | 
 | 1866 | 	if (!page->mapping) { | 
 | 1867 | 		unlock_page(page); | 
 | 1868 | 		page_cache_release(page); | 
 | 1869 | 		goto retry; | 
 | 1870 | 	} | 
 | 1871 | 	if (PageUptodate(page)) { | 
 | 1872 | 		unlock_page(page); | 
 | 1873 | 		goto out; | 
 | 1874 | 	} | 
 | 1875 | 	err = filler(data, page); | 
 | 1876 | 	if (err < 0) { | 
 | 1877 | 		page_cache_release(page); | 
| David Howells | c855ff3 | 2007-05-09 13:42:20 +0100 | [diff] [blame] | 1878 | 		return ERR_PTR(err); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1879 | 	} | 
| David Howells | c855ff3 | 2007-05-09 13:42:20 +0100 | [diff] [blame] | 1880 | out: | 
| Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 1881 | 	mark_page_accessed(page); | 
 | 1882 | 	return page; | 
 | 1883 | } | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1884 |  | 
 | 1885 | /** | 
 | 1886 |  * read_cache_page_async - read into page cache, fill it if needed | 
 | 1887 |  * @mapping:	the page's address_space | 
 | 1888 |  * @index:	the page index | 
 | 1889 |  * @filler:	function to perform the read | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 1890 |  * @data:	first arg to filler(data, page) function, often left as NULL | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1891 |  * | 
 | 1892 |  * Same as read_cache_page, but don't wait for page to become unlocked | 
 | 1893 |  * after submitting it to the filler. | 
 | 1894 |  * | 
 | 1895 |  * Read into the page cache. If a page already exists, and PageUptodate() is | 
 | 1896 |  * not set, try to fill the page but don't wait for it to become unlocked. | 
 | 1897 |  * | 
 | 1898 |  * If the page does not get brought uptodate, return -EIO. | 
 | 1899 |  */ | 
 | 1900 | struct page *read_cache_page_async(struct address_space *mapping, | 
 | 1901 | 				pgoff_t index, | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 1902 | 				int (*filler)(void *, struct page *), | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1903 | 				void *data) | 
 | 1904 | { | 
 | 1905 | 	return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); | 
 | 1906 | } | 
| Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 1907 | EXPORT_SYMBOL(read_cache_page_async); | 
 | 1908 |  | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1909 | static struct page *wait_on_page_read(struct page *page) | 
 | 1910 | { | 
 | 1911 | 	if (!IS_ERR(page)) { | 
 | 1912 | 		wait_on_page_locked(page); | 
 | 1913 | 		if (!PageUptodate(page)) { | 
 | 1914 | 			page_cache_release(page); | 
 | 1915 | 			page = ERR_PTR(-EIO); | 
 | 1916 | 		} | 
 | 1917 | 	} | 
 | 1918 | 	return page; | 
 | 1919 | } | 
 | 1920 |  | 
 | 1921 | /** | 
 | 1922 |  * read_cache_page_gfp - read into page cache, using specified page allocation flags. | 
 | 1923 |  * @mapping:	the page's address_space | 
 | 1924 |  * @index:	the page index | 
 | 1925 |  * @gfp:	the page allocator flags to use if allocating | 
 | 1926 |  * | 
 | 1927 |  * This is the same as "read_mapping_page(mapping, index, NULL)", but with | 
 | 1928 |  * any new page allocations done using the specified allocation flags. Note | 
 | 1929 |  * that the Radix tree operations will still use GFP_KERNEL, so you can't | 
 | 1930 |  * expect to do this atomically or anything like that - but you can pass in | 
 | 1931 |  * other page requirements. | 
 | 1932 |  * | 
 | 1933 |  * If the page does not get brought uptodate, return -EIO. | 
 | 1934 |  */ | 
 | 1935 | struct page *read_cache_page_gfp(struct address_space *mapping, | 
 | 1936 | 				pgoff_t index, | 
 | 1937 | 				gfp_t gfp) | 
 | 1938 | { | 
 | 1939 | 	filler_t *filler = (filler_t *)mapping->a_ops->readpage; | 
 | 1940 |  | 
 | 1941 | 	return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp)); | 
 | 1942 | } | 
 | 1943 | EXPORT_SYMBOL(read_cache_page_gfp); | 
 | 1944 |  | 
| Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 1945 | /** | 
 | 1946 |  * read_cache_page - read into page cache, fill it if needed | 
 | 1947 |  * @mapping:	the page's address_space | 
 | 1948 |  * @index:	the page index | 
 | 1949 |  * @filler:	function to perform the read | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 1950 |  * @data:	first arg to filler(data, page) function, often left as NULL | 
| Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 1951 |  * | 
 | 1952 |  * Read into the page cache. If a page already exists, and PageUptodate() is | 
 | 1953 |  * not set, try to fill the page then wait for it to become unlocked. | 
 | 1954 |  * | 
 | 1955 |  * If the page does not get brought uptodate, return -EIO. | 
 | 1956 |  */ | 
 | 1957 | struct page *read_cache_page(struct address_space *mapping, | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1958 | 				pgoff_t index, | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 1959 | 				int (*filler)(void *, struct page *), | 
| Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 1960 | 				void *data) | 
 | 1961 | { | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 1962 | 	return wait_on_page_read(read_cache_page_async(mapping, index, filler, data)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1963 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1964 | EXPORT_SYMBOL(read_cache_page); | 
 | 1965 |  | 
 | 1966 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1967 |  * The logic we want is | 
 | 1968 |  * | 
 | 1969 |  *	if suid or (sgid and xgrp) | 
 | 1970 |  *		remove privs | 
 | 1971 |  */ | 
| Jens Axboe | 01de85e | 2006-10-17 19:50:36 +0200 | [diff] [blame] | 1972 | int should_remove_suid(struct dentry *dentry) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1973 | { | 
 | 1974 | 	mode_t mode = dentry->d_inode->i_mode; | 
 | 1975 | 	int kill = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1976 |  | 
 | 1977 | 	/* suid always must be killed */ | 
 | 1978 | 	if (unlikely(mode & S_ISUID)) | 
 | 1979 | 		kill = ATTR_KILL_SUID; | 
 | 1980 |  | 
 | 1981 | 	/* | 
 | 1982 | 	 * sgid without any exec bits is just a mandatory locking mark; leave | 
 | 1983 | 	 * it alone.  If some exec bits are set, it's a real sgid; kill it. | 
 | 1984 | 	 */ | 
 | 1985 | 	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) | 
 | 1986 | 		kill |= ATTR_KILL_SGID; | 
 | 1987 |  | 
| Dmitri Monakhov | 7f5ff76 | 2008-12-01 14:34:56 -0800 | [diff] [blame] | 1988 | 	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) | 
| Jens Axboe | 01de85e | 2006-10-17 19:50:36 +0200 | [diff] [blame] | 1989 | 		return kill; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 |  | 
| Jens Axboe | 01de85e | 2006-10-17 19:50:36 +0200 | [diff] [blame] | 1991 | 	return 0; | 
 | 1992 | } | 
| Mark Fasheh | d23a147 | 2006-10-17 17:05:18 -0700 | [diff] [blame] | 1993 | EXPORT_SYMBOL(should_remove_suid); | 
| Jens Axboe | 01de85e | 2006-10-17 19:50:36 +0200 | [diff] [blame] | 1994 |  | 
| Miklos Szeredi | 7f3d4ee | 2008-05-07 09:22:39 +0200 | [diff] [blame] | 1995 | static int __remove_suid(struct dentry *dentry, int kill) | 
| Jens Axboe | 01de85e | 2006-10-17 19:50:36 +0200 | [diff] [blame] | 1996 | { | 
 | 1997 | 	struct iattr newattrs; | 
 | 1998 |  | 
 | 1999 | 	newattrs.ia_valid = ATTR_FORCE | kill; | 
 | 2000 | 	return notify_change(dentry, &newattrs); | 
 | 2001 | } | 
 | 2002 |  | 
| Miklos Szeredi | 2f1936b | 2008-06-24 16:50:14 +0200 | [diff] [blame] | 2003 | int file_remove_suid(struct file *file) | 
| Jens Axboe | 01de85e | 2006-10-17 19:50:36 +0200 | [diff] [blame] | 2004 | { | 
| Miklos Szeredi | 2f1936b | 2008-06-24 16:50:14 +0200 | [diff] [blame] | 2005 | 	struct dentry *dentry = file->f_path.dentry; | 
| Andi Kleen | 69b4573 | 2011-05-28 08:25:51 -0700 | [diff] [blame] | 2006 | 	struct inode *inode = dentry->d_inode; | 
 | 2007 | 	int killsuid; | 
 | 2008 | 	int killpriv; | 
| Serge E. Hallyn | b537677 | 2007-10-16 23:31:36 -0700 | [diff] [blame] | 2009 | 	int error = 0; | 
| Jens Axboe | 01de85e | 2006-10-17 19:50:36 +0200 | [diff] [blame] | 2010 |  | 
| Andi Kleen | 69b4573 | 2011-05-28 08:25:51 -0700 | [diff] [blame] | 2011 | 	/* Fast path for nothing security related */ | 
 | 2012 | 	if (IS_NOSEC(inode)) | 
 | 2013 | 		return 0; | 
 | 2014 |  | 
 | 2015 | 	killsuid = should_remove_suid(dentry); | 
 | 2016 | 	killpriv = security_inode_need_killpriv(dentry); | 
 | 2017 |  | 
| Serge E. Hallyn | b537677 | 2007-10-16 23:31:36 -0700 | [diff] [blame] | 2018 | 	if (killpriv < 0) | 
 | 2019 | 		return killpriv; | 
 | 2020 | 	if (killpriv) | 
 | 2021 | 		error = security_inode_killpriv(dentry); | 
 | 2022 | 	if (!error && killsuid) | 
 | 2023 | 		error = __remove_suid(dentry, killsuid); | 
| Al Viro | 9e1f1de | 2011-06-03 18:24:58 -0400 | [diff] [blame] | 2024 | 	if (!error && (inode->i_sb->s_flags & MS_NOSEC)) | 
| Andi Kleen | 69b4573 | 2011-05-28 08:25:51 -0700 | [diff] [blame] | 2025 | 		inode->i_flags |= S_NOSEC; | 
| Jens Axboe | 01de85e | 2006-10-17 19:50:36 +0200 | [diff] [blame] | 2026 |  | 
| Serge E. Hallyn | b537677 | 2007-10-16 23:31:36 -0700 | [diff] [blame] | 2027 | 	return error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2028 | } | 
| Miklos Szeredi | 2f1936b | 2008-06-24 16:50:14 +0200 | [diff] [blame] | 2029 | EXPORT_SYMBOL(file_remove_suid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2030 |  | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2031 | static size_t __iovec_copy_from_user_inatomic(char *vaddr, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2032 | 			const struct iovec *iov, size_t base, size_t bytes) | 
 | 2033 | { | 
| Ingo Molnar | f180053 | 2009-03-02 11:00:57 +0100 | [diff] [blame] | 2034 | 	size_t copied = 0, left = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2035 |  | 
 | 2036 | 	while (bytes) { | 
 | 2037 | 		char __user *buf = iov->iov_base + base; | 
 | 2038 | 		int copy = min(bytes, iov->iov_len - base); | 
 | 2039 |  | 
 | 2040 | 		base = 0; | 
| Ingo Molnar | f180053 | 2009-03-02 11:00:57 +0100 | [diff] [blame] | 2041 | 		left = __copy_from_user_inatomic(vaddr, buf, copy); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2042 | 		copied += copy; | 
 | 2043 | 		bytes -= copy; | 
 | 2044 | 		vaddr += copy; | 
 | 2045 | 		iov++; | 
 | 2046 |  | 
| NeilBrown | 01408c4 | 2006-06-25 05:47:58 -0700 | [diff] [blame] | 2047 | 		if (unlikely(left)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2048 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2049 | 	} | 
 | 2050 | 	return copied - left; | 
 | 2051 | } | 
 | 2052 |  | 
 | 2053 | /* | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2054 |  * Copy as much as we can into the page and return the number of bytes which | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 2055 |  * were successfully copied.  If a fault is encountered then return the number of | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2056 |  * bytes which were copied. | 
 | 2057 |  */ | 
 | 2058 | size_t iov_iter_copy_from_user_atomic(struct page *page, | 
 | 2059 | 		struct iov_iter *i, unsigned long offset, size_t bytes) | 
 | 2060 | { | 
 | 2061 | 	char *kaddr; | 
 | 2062 | 	size_t copied; | 
 | 2063 |  | 
 | 2064 | 	BUG_ON(!in_atomic()); | 
 | 2065 | 	kaddr = kmap_atomic(page, KM_USER0); | 
 | 2066 | 	if (likely(i->nr_segs == 1)) { | 
 | 2067 | 		int left; | 
 | 2068 | 		char __user *buf = i->iov->iov_base + i->iov_offset; | 
| Ingo Molnar | f180053 | 2009-03-02 11:00:57 +0100 | [diff] [blame] | 2069 | 		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2070 | 		copied = bytes - left; | 
 | 2071 | 	} else { | 
 | 2072 | 		copied = __iovec_copy_from_user_inatomic(kaddr + offset, | 
 | 2073 | 						i->iov, i->iov_offset, bytes); | 
 | 2074 | 	} | 
 | 2075 | 	kunmap_atomic(kaddr, KM_USER0); | 
 | 2076 |  | 
 | 2077 | 	return copied; | 
 | 2078 | } | 
| Nick Piggin | 89e1078 | 2007-10-16 01:25:07 -0700 | [diff] [blame] | 2079 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2080 |  | 
 | 2081 | /* | 
 | 2082 |  * This has the same sideeffects and return value as | 
 | 2083 |  * iov_iter_copy_from_user_atomic(). | 
 | 2084 |  * The difference is that it attempts to resolve faults. | 
 | 2085 |  * Page must not be locked. | 
 | 2086 |  */ | 
 | 2087 | size_t iov_iter_copy_from_user(struct page *page, | 
 | 2088 | 		struct iov_iter *i, unsigned long offset, size_t bytes) | 
 | 2089 | { | 
 | 2090 | 	char *kaddr; | 
 | 2091 | 	size_t copied; | 
 | 2092 |  | 
 | 2093 | 	kaddr = kmap(page); | 
 | 2094 | 	if (likely(i->nr_segs == 1)) { | 
 | 2095 | 		int left; | 
 | 2096 | 		char __user *buf = i->iov->iov_base + i->iov_offset; | 
| Ingo Molnar | f180053 | 2009-03-02 11:00:57 +0100 | [diff] [blame] | 2097 | 		left = __copy_from_user(kaddr + offset, buf, bytes); | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2098 | 		copied = bytes - left; | 
 | 2099 | 	} else { | 
 | 2100 | 		copied = __iovec_copy_from_user_inatomic(kaddr + offset, | 
 | 2101 | 						i->iov, i->iov_offset, bytes); | 
 | 2102 | 	} | 
 | 2103 | 	kunmap(page); | 
 | 2104 | 	return copied; | 
 | 2105 | } | 
| Nick Piggin | 89e1078 | 2007-10-16 01:25:07 -0700 | [diff] [blame] | 2106 | EXPORT_SYMBOL(iov_iter_copy_from_user); | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2107 |  | 
| Nick Piggin | f700926 | 2008-03-10 11:43:59 -0700 | [diff] [blame] | 2108 | void iov_iter_advance(struct iov_iter *i, size_t bytes) | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2109 | { | 
| Nick Piggin | f700926 | 2008-03-10 11:43:59 -0700 | [diff] [blame] | 2110 | 	BUG_ON(i->count < bytes); | 
 | 2111 |  | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2112 | 	if (likely(i->nr_segs == 1)) { | 
 | 2113 | 		i->iov_offset += bytes; | 
| Nick Piggin | f700926 | 2008-03-10 11:43:59 -0700 | [diff] [blame] | 2114 | 		i->count -= bytes; | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2115 | 	} else { | 
 | 2116 | 		const struct iovec *iov = i->iov; | 
 | 2117 | 		size_t base = i->iov_offset; | 
| Jeff Layton | 39be79c | 2011-10-27 23:53:08 +0200 | [diff] [blame] | 2118 | 		unsigned long nr_segs = i->nr_segs; | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2119 |  | 
| Nick Piggin | 124d3b7 | 2008-02-02 15:01:17 +0100 | [diff] [blame] | 2120 | 		/* | 
 | 2121 | 		 * The !iov->iov_len check ensures we skip over unlikely | 
| Nick Piggin | f700926 | 2008-03-10 11:43:59 -0700 | [diff] [blame] | 2122 | 		 * zero-length segments (without overruning the iovec). | 
| Nick Piggin | 124d3b7 | 2008-02-02 15:01:17 +0100 | [diff] [blame] | 2123 | 		 */ | 
| Linus Torvalds | 94ad374 | 2008-07-30 14:45:12 -0700 | [diff] [blame] | 2124 | 		while (bytes || unlikely(i->count && !iov->iov_len)) { | 
| Nick Piggin | f700926 | 2008-03-10 11:43:59 -0700 | [diff] [blame] | 2125 | 			int copy; | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2126 |  | 
| Nick Piggin | f700926 | 2008-03-10 11:43:59 -0700 | [diff] [blame] | 2127 | 			copy = min(bytes, iov->iov_len - base); | 
 | 2128 | 			BUG_ON(!i->count || i->count < copy); | 
 | 2129 | 			i->count -= copy; | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2130 | 			bytes -= copy; | 
 | 2131 | 			base += copy; | 
 | 2132 | 			if (iov->iov_len == base) { | 
 | 2133 | 				iov++; | 
| Jeff Layton | 39be79c | 2011-10-27 23:53:08 +0200 | [diff] [blame] | 2134 | 				nr_segs--; | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2135 | 				base = 0; | 
 | 2136 | 			} | 
 | 2137 | 		} | 
 | 2138 | 		i->iov = iov; | 
 | 2139 | 		i->iov_offset = base; | 
| Jeff Layton | 39be79c | 2011-10-27 23:53:08 +0200 | [diff] [blame] | 2140 | 		i->nr_segs = nr_segs; | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2141 | 	} | 
 | 2142 | } | 
| Nick Piggin | 89e1078 | 2007-10-16 01:25:07 -0700 | [diff] [blame] | 2143 | EXPORT_SYMBOL(iov_iter_advance); | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2144 |  | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2145 | /* | 
 | 2146 |  * Fault in the first iovec of the given iov_iter, to a maximum length | 
 | 2147 |  * of bytes. Returns 0 on success, or non-zero if the memory could not be | 
 | 2148 |  * accessed (ie. because it is an invalid address). | 
 | 2149 |  * | 
 | 2150 |  * writev-intensive code may want this to prefault several iovecs -- that | 
 | 2151 |  * would be possible (callers must not rely on the fact that _only_ the | 
 | 2152 |  * first iovec will be faulted with the current implementation). | 
 | 2153 |  */ | 
 | 2154 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2155 | { | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2156 | 	char __user *buf = i->iov->iov_base + i->iov_offset; | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2157 | 	bytes = min(bytes, i->iov->iov_len - i->iov_offset); | 
 | 2158 | 	return fault_in_pages_readable(buf, bytes); | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2159 | } | 
| Nick Piggin | 89e1078 | 2007-10-16 01:25:07 -0700 | [diff] [blame] | 2160 | EXPORT_SYMBOL(iov_iter_fault_in_readable); | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2161 |  | 
 | 2162 | /* | 
 | 2163 |  * Return the count of just the current iov_iter segment. | 
 | 2164 |  */ | 
 | 2165 | size_t iov_iter_single_seg_count(struct iov_iter *i) | 
 | 2166 | { | 
 | 2167 | 	const struct iovec *iov = i->iov; | 
 | 2168 | 	if (i->nr_segs == 1) | 
 | 2169 | 		return i->count; | 
 | 2170 | 	else | 
 | 2171 | 		return min(i->count, iov->iov_len - i->iov_offset); | 
 | 2172 | } | 
| Nick Piggin | 89e1078 | 2007-10-16 01:25:07 -0700 | [diff] [blame] | 2173 | EXPORT_SYMBOL(iov_iter_single_seg_count); | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2174 |  | 
 | 2175 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2176 |  * Performs necessary checks before doing a write | 
 | 2177 |  * | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2178 |  * Can adjust writing position or amount of bytes to write. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2179 |  * Returns appropriate error code that caller should return or | 
 | 2180 |  * zero in case that write should be allowed. | 
 | 2181 |  */ | 
 | 2182 | inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) | 
 | 2183 | { | 
 | 2184 | 	struct inode *inode = file->f_mapping->host; | 
| Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 2185 | 	unsigned long limit = rlimit(RLIMIT_FSIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2186 |  | 
 | 2187 |         if (unlikely(*pos < 0)) | 
 | 2188 |                 return -EINVAL; | 
 | 2189 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2190 | 	if (!isblk) { | 
 | 2191 | 		/* FIXME: this is for backwards compatibility with 2.4 */ | 
 | 2192 | 		if (file->f_flags & O_APPEND) | 
 | 2193 |                         *pos = i_size_read(inode); | 
 | 2194 |  | 
 | 2195 | 		if (limit != RLIM_INFINITY) { | 
 | 2196 | 			if (*pos >= limit) { | 
 | 2197 | 				send_sig(SIGXFSZ, current, 0); | 
 | 2198 | 				return -EFBIG; | 
 | 2199 | 			} | 
 | 2200 | 			if (*count > limit - (typeof(limit))*pos) { | 
 | 2201 | 				*count = limit - (typeof(limit))*pos; | 
 | 2202 | 			} | 
 | 2203 | 		} | 
 | 2204 | 	} | 
 | 2205 |  | 
 | 2206 | 	/* | 
 | 2207 | 	 * LFS rule | 
 | 2208 | 	 */ | 
 | 2209 | 	if (unlikely(*pos + *count > MAX_NON_LFS && | 
 | 2210 | 				!(file->f_flags & O_LARGEFILE))) { | 
 | 2211 | 		if (*pos >= MAX_NON_LFS) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2212 | 			return -EFBIG; | 
 | 2213 | 		} | 
 | 2214 | 		if (*count > MAX_NON_LFS - (unsigned long)*pos) { | 
 | 2215 | 			*count = MAX_NON_LFS - (unsigned long)*pos; | 
 | 2216 | 		} | 
 | 2217 | 	} | 
 | 2218 |  | 
 | 2219 | 	/* | 
 | 2220 | 	 * Are we about to exceed the fs block limit ? | 
 | 2221 | 	 * | 
 | 2222 | 	 * If we have written data it becomes a short write.  If we have | 
 | 2223 | 	 * exceeded without writing data we send a signal and return EFBIG. | 
 | 2224 | 	 * Linus frestrict idea will clean these up nicely.. | 
 | 2225 | 	 */ | 
 | 2226 | 	if (likely(!isblk)) { | 
 | 2227 | 		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { | 
 | 2228 | 			if (*count || *pos > inode->i_sb->s_maxbytes) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2229 | 				return -EFBIG; | 
 | 2230 | 			} | 
 | 2231 | 			/* zero-length writes at ->s_maxbytes are OK */ | 
 | 2232 | 		} | 
 | 2233 |  | 
 | 2234 | 		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) | 
 | 2235 | 			*count = inode->i_sb->s_maxbytes - *pos; | 
 | 2236 | 	} else { | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 2237 | #ifdef CONFIG_BLOCK | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 | 		loff_t isize; | 
 | 2239 | 		if (bdev_read_only(I_BDEV(inode))) | 
 | 2240 | 			return -EPERM; | 
 | 2241 | 		isize = i_size_read(inode); | 
 | 2242 | 		if (*pos >= isize) { | 
 | 2243 | 			if (*count || *pos > isize) | 
 | 2244 | 				return -ENOSPC; | 
 | 2245 | 		} | 
 | 2246 |  | 
 | 2247 | 		if (*pos + *count > isize) | 
 | 2248 | 			*count = isize - *pos; | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 2249 | #else | 
 | 2250 | 		return -EPERM; | 
 | 2251 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2252 | 	} | 
 | 2253 | 	return 0; | 
 | 2254 | } | 
 | 2255 | EXPORT_SYMBOL(generic_write_checks); | 
 | 2256 |  | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2257 | int pagecache_write_begin(struct file *file, struct address_space *mapping, | 
 | 2258 | 				loff_t pos, unsigned len, unsigned flags, | 
 | 2259 | 				struct page **pagep, void **fsdata) | 
 | 2260 | { | 
 | 2261 | 	const struct address_space_operations *aops = mapping->a_ops; | 
 | 2262 |  | 
| Nick Piggin | 4e02ed4 | 2008-10-29 14:00:55 -0700 | [diff] [blame] | 2263 | 	return aops->write_begin(file, mapping, pos, len, flags, | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2264 | 							pagep, fsdata); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2265 | } | 
 | 2266 | EXPORT_SYMBOL(pagecache_write_begin); | 
 | 2267 |  | 
 | 2268 | int pagecache_write_end(struct file *file, struct address_space *mapping, | 
 | 2269 | 				loff_t pos, unsigned len, unsigned copied, | 
 | 2270 | 				struct page *page, void *fsdata) | 
 | 2271 | { | 
 | 2272 | 	const struct address_space_operations *aops = mapping->a_ops; | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2273 |  | 
| Nick Piggin | 4e02ed4 | 2008-10-29 14:00:55 -0700 | [diff] [blame] | 2274 | 	mark_page_accessed(page); | 
 | 2275 | 	return aops->write_end(file, mapping, pos, len, copied, page, fsdata); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2276 | } | 
 | 2277 | EXPORT_SYMBOL(pagecache_write_end); | 
 | 2278 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2279 | ssize_t | 
 | 2280 | generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | 
 | 2281 | 		unsigned long *nr_segs, loff_t pos, loff_t *ppos, | 
 | 2282 | 		size_t count, size_t ocount) | 
 | 2283 | { | 
 | 2284 | 	struct file	*file = iocb->ki_filp; | 
 | 2285 | 	struct address_space *mapping = file->f_mapping; | 
 | 2286 | 	struct inode	*inode = mapping->host; | 
 | 2287 | 	ssize_t		written; | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2288 | 	size_t		write_len; | 
 | 2289 | 	pgoff_t		end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2290 |  | 
 | 2291 | 	if (count != ocount) | 
 | 2292 | 		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); | 
 | 2293 |  | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2294 | 	write_len = iov_length(iov, *nr_segs); | 
 | 2295 | 	end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2296 |  | 
| Nick Piggin | 48b47c5 | 2009-01-06 14:40:22 -0800 | [diff] [blame] | 2297 | 	written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2298 | 	if (written) | 
 | 2299 | 		goto out; | 
 | 2300 |  | 
 | 2301 | 	/* | 
 | 2302 | 	 * After a write we want buffered reads to be sure to go to disk to get | 
 | 2303 | 	 * the new data.  We invalidate clean cached page from the region we're | 
 | 2304 | 	 * about to write.  We do this *before* the write so that we can return | 
| Hisashi Hifumi | 6ccfa80 | 2008-09-02 14:35:40 -0700 | [diff] [blame] | 2305 | 	 * without clobbering -EIOCBQUEUED from ->direct_IO(). | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2306 | 	 */ | 
 | 2307 | 	if (mapping->nrpages) { | 
 | 2308 | 		written = invalidate_inode_pages2_range(mapping, | 
 | 2309 | 					pos >> PAGE_CACHE_SHIFT, end); | 
| Hisashi Hifumi | 6ccfa80 | 2008-09-02 14:35:40 -0700 | [diff] [blame] | 2310 | 		/* | 
 | 2311 | 		 * If a page can not be invalidated, return 0 to fall back | 
 | 2312 | 		 * to buffered write. | 
 | 2313 | 		 */ | 
 | 2314 | 		if (written) { | 
 | 2315 | 			if (written == -EBUSY) | 
 | 2316 | 				return 0; | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2317 | 			goto out; | 
| Hisashi Hifumi | 6ccfa80 | 2008-09-02 14:35:40 -0700 | [diff] [blame] | 2318 | 		} | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2319 | 	} | 
 | 2320 |  | 
 | 2321 | 	written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); | 
 | 2322 |  | 
 | 2323 | 	/* | 
 | 2324 | 	 * Finally, try again to invalidate clean pages which might have been | 
 | 2325 | 	 * cached by non-direct readahead, or faulted in by get_user_pages() | 
 | 2326 | 	 * if the source of the write was an mmap'ed region of the file | 
 | 2327 | 	 * we're writing.  Either one is a pretty crazy thing to do, | 
 | 2328 | 	 * so we don't support it 100%.  If this invalidation | 
 | 2329 | 	 * fails, tough, the write still worked... | 
 | 2330 | 	 */ | 
 | 2331 | 	if (mapping->nrpages) { | 
 | 2332 | 		invalidate_inode_pages2_range(mapping, | 
 | 2333 | 					      pos >> PAGE_CACHE_SHIFT, end); | 
 | 2334 | 	} | 
 | 2335 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2336 | 	if (written > 0) { | 
| Namhyung Kim | 0116651 | 2010-10-26 14:21:58 -0700 | [diff] [blame] | 2337 | 		pos += written; | 
 | 2338 | 		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { | 
 | 2339 | 			i_size_write(inode, pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2340 | 			mark_inode_dirty(inode); | 
 | 2341 | 		} | 
| Namhyung Kim | 0116651 | 2010-10-26 14:21:58 -0700 | [diff] [blame] | 2342 | 		*ppos = pos; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2343 | 	} | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2344 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2345 | 	return written; | 
 | 2346 | } | 
 | 2347 | EXPORT_SYMBOL(generic_file_direct_write); | 
 | 2348 |  | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2349 | /* | 
 | 2350 |  * Find or create a page at the given pagecache position. Return the locked | 
 | 2351 |  * page. This function is specifically for buffered writes. | 
 | 2352 |  */ | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 2353 | struct page *grab_cache_page_write_begin(struct address_space *mapping, | 
 | 2354 | 					pgoff_t index, unsigned flags) | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2355 | { | 
 | 2356 | 	int status; | 
 | 2357 | 	struct page *page; | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 2358 | 	gfp_t gfp_notmask = 0; | 
 | 2359 | 	if (flags & AOP_FLAG_NOFS) | 
 | 2360 | 		gfp_notmask = __GFP_FS; | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2361 | repeat: | 
 | 2362 | 	page = find_lock_page(mapping, index); | 
| Steven Rostedt | c585a26 | 2011-01-13 15:46:18 -0800 | [diff] [blame] | 2363 | 	if (page) | 
| Darrick J. Wong | 3d08bcc | 2011-05-27 12:23:34 -0700 | [diff] [blame] | 2364 | 		goto found; | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2365 |  | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 2366 | 	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2367 | 	if (!page) | 
 | 2368 | 		return NULL; | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 2369 | 	status = add_to_page_cache_lru(page, mapping, index, | 
 | 2370 | 						GFP_KERNEL & ~gfp_notmask); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2371 | 	if (unlikely(status)) { | 
 | 2372 | 		page_cache_release(page); | 
 | 2373 | 		if (status == -EEXIST) | 
 | 2374 | 			goto repeat; | 
 | 2375 | 		return NULL; | 
 | 2376 | 	} | 
| Darrick J. Wong | 3d08bcc | 2011-05-27 12:23:34 -0700 | [diff] [blame] | 2377 | found: | 
 | 2378 | 	wait_on_page_writeback(page); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2379 | 	return page; | 
 | 2380 | } | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 2381 | EXPORT_SYMBOL(grab_cache_page_write_begin); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2382 |  | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2383 | static ssize_t generic_perform_write(struct file *file, | 
 | 2384 | 				struct iov_iter *i, loff_t pos) | 
 | 2385 | { | 
 | 2386 | 	struct address_space *mapping = file->f_mapping; | 
 | 2387 | 	const struct address_space_operations *a_ops = mapping->a_ops; | 
 | 2388 | 	long status = 0; | 
 | 2389 | 	ssize_t written = 0; | 
| Nick Piggin | 674b892 | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2390 | 	unsigned int flags = 0; | 
 | 2391 |  | 
 | 2392 | 	/* | 
 | 2393 | 	 * Copies from kernel address space cannot fail (NFSD is a big user). | 
 | 2394 | 	 */ | 
 | 2395 | 	if (segment_eq(get_fs(), KERNEL_DS)) | 
 | 2396 | 		flags |= AOP_FLAG_UNINTERRUPTIBLE; | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2397 |  | 
 | 2398 | 	do { | 
 | 2399 | 		struct page *page; | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2400 | 		unsigned long offset;	/* Offset into pagecache page */ | 
 | 2401 | 		unsigned long bytes;	/* Bytes to write to page */ | 
 | 2402 | 		size_t copied;		/* Bytes copied from user */ | 
 | 2403 | 		void *fsdata; | 
 | 2404 |  | 
 | 2405 | 		offset = (pos & (PAGE_CACHE_SIZE - 1)); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2406 | 		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, | 
 | 2407 | 						iov_iter_count(i)); | 
 | 2408 |  | 
 | 2409 | again: | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2410 | 		/* | 
 | 2411 | 		 * Bring in the user page that we will copy from _first_. | 
 | 2412 | 		 * Otherwise there's a nasty deadlock on copying from the | 
 | 2413 | 		 * same page as we're writing to, without it being marked | 
 | 2414 | 		 * up-to-date. | 
 | 2415 | 		 * | 
 | 2416 | 		 * Not only is this an optimisation, but it is also required | 
 | 2417 | 		 * to check that the address is actually valid, when atomic | 
 | 2418 | 		 * usercopies are used, below. | 
 | 2419 | 		 */ | 
 | 2420 | 		if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | 
 | 2421 | 			status = -EFAULT; | 
 | 2422 | 			break; | 
 | 2423 | 		} | 
 | 2424 |  | 
| Nick Piggin | 674b892 | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2425 | 		status = a_ops->write_begin(file, mapping, pos, bytes, flags, | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2426 | 						&page, &fsdata); | 
 | 2427 | 		if (unlikely(status)) | 
 | 2428 | 			break; | 
 | 2429 |  | 
| anfei zhou | 931e80e | 2010-02-02 13:44:02 -0800 | [diff] [blame] | 2430 | 		if (mapping_writably_mapped(mapping)) | 
 | 2431 | 			flush_dcache_page(page); | 
 | 2432 |  | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2433 | 		pagefault_disable(); | 
 | 2434 | 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); | 
 | 2435 | 		pagefault_enable(); | 
 | 2436 | 		flush_dcache_page(page); | 
 | 2437 |  | 
| Josef Bacik | c8236db | 2009-07-05 12:08:18 -0700 | [diff] [blame] | 2438 | 		mark_page_accessed(page); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2439 | 		status = a_ops->write_end(file, mapping, pos, bytes, copied, | 
 | 2440 | 						page, fsdata); | 
 | 2441 | 		if (unlikely(status < 0)) | 
 | 2442 | 			break; | 
 | 2443 | 		copied = status; | 
 | 2444 |  | 
 | 2445 | 		cond_resched(); | 
 | 2446 |  | 
| Nick Piggin | 124d3b7 | 2008-02-02 15:01:17 +0100 | [diff] [blame] | 2447 | 		iov_iter_advance(i, copied); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2448 | 		if (unlikely(copied == 0)) { | 
 | 2449 | 			/* | 
 | 2450 | 			 * If we were unable to copy any data at all, we must | 
 | 2451 | 			 * fall back to a single segment length write. | 
 | 2452 | 			 * | 
 | 2453 | 			 * If we didn't fallback here, we could livelock | 
 | 2454 | 			 * because not all segments in the iov can be copied at | 
 | 2455 | 			 * once without a pagefault. | 
 | 2456 | 			 */ | 
 | 2457 | 			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, | 
 | 2458 | 						iov_iter_single_seg_count(i)); | 
 | 2459 | 			goto again; | 
 | 2460 | 		} | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2461 | 		pos += copied; | 
 | 2462 | 		written += copied; | 
 | 2463 |  | 
 | 2464 | 		balance_dirty_pages_ratelimited(mapping); | 
| Jan Kara | a50527b | 2011-12-02 09:17:02 +0800 | [diff] [blame] | 2465 | 		if (fatal_signal_pending(current)) { | 
 | 2466 | 			status = -EINTR; | 
 | 2467 | 			break; | 
 | 2468 | 		} | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2469 | 	} while (iov_iter_count(i)); | 
 | 2470 |  | 
 | 2471 | 	return written ? written : status; | 
 | 2472 | } | 
 | 2473 |  | 
 | 2474 | ssize_t | 
 | 2475 | generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | 
 | 2476 | 		unsigned long nr_segs, loff_t pos, loff_t *ppos, | 
 | 2477 | 		size_t count, ssize_t written) | 
 | 2478 | { | 
 | 2479 | 	struct file *file = iocb->ki_filp; | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2480 | 	ssize_t status; | 
 | 2481 | 	struct iov_iter i; | 
 | 2482 |  | 
 | 2483 | 	iov_iter_init(&i, iov, nr_segs, count, written); | 
| Nick Piggin | 4e02ed4 | 2008-10-29 14:00:55 -0700 | [diff] [blame] | 2484 | 	status = generic_perform_write(file, &i, pos); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2485 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2486 | 	if (likely(status >= 0)) { | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2487 | 		written += status; | 
 | 2488 | 		*ppos = pos + status; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2489 |   	} | 
 | 2490 | 	 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2491 | 	return written ? written : status; | 
 | 2492 | } | 
 | 2493 | EXPORT_SYMBOL(generic_file_buffered_write); | 
 | 2494 |  | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 2495 | /** | 
 | 2496 |  * __generic_file_aio_write - write data to a file | 
 | 2497 |  * @iocb:	IO state structure (file, offset, etc.) | 
 | 2498 |  * @iov:	vector with data to write | 
 | 2499 |  * @nr_segs:	number of segments in the vector | 
 | 2500 |  * @ppos:	position where to write | 
 | 2501 |  * | 
 | 2502 |  * This function does all the work needed for actually writing data to a | 
 | 2503 |  * file. It does all basic checks, removes SUID from the file, updates | 
 | 2504 |  * modification times and calls proper subroutines depending on whether we | 
 | 2505 |  * do direct IO or a standard buffered write. | 
 | 2506 |  * | 
 | 2507 |  * It expects i_mutex to be grabbed unless we work on a block device or similar | 
 | 2508 |  * object which does not need locking at all. | 
 | 2509 |  * | 
 | 2510 |  * This function does *not* take care of syncing data in case of O_SYNC write. | 
 | 2511 |  * A caller has to handle it. This is mainly due to the fact that we want to | 
 | 2512 |  * avoid syncing under i_mutex. | 
 | 2513 |  */ | 
 | 2514 | ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | 
 | 2515 | 				 unsigned long nr_segs, loff_t *ppos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2516 | { | 
 | 2517 | 	struct file *file = iocb->ki_filp; | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2518 | 	struct address_space * mapping = file->f_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2519 | 	size_t ocount;		/* original count */ | 
 | 2520 | 	size_t count;		/* after file limit checks */ | 
 | 2521 | 	struct inode 	*inode = mapping->host; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2522 | 	loff_t		pos; | 
 | 2523 | 	ssize_t		written; | 
 | 2524 | 	ssize_t		err; | 
 | 2525 |  | 
 | 2526 | 	ocount = 0; | 
| Dmitriy Monakhov | 0ceb331 | 2007-05-08 00:23:02 -0700 | [diff] [blame] | 2527 | 	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); | 
 | 2528 | 	if (err) | 
 | 2529 | 		return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2530 |  | 
 | 2531 | 	count = ocount; | 
 | 2532 | 	pos = *ppos; | 
 | 2533 |  | 
 | 2534 | 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | 
 | 2535 |  | 
 | 2536 | 	/* We can write back this queue in page reclaim */ | 
 | 2537 | 	current->backing_dev_info = mapping->backing_dev_info; | 
 | 2538 | 	written = 0; | 
 | 2539 |  | 
 | 2540 | 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | 
 | 2541 | 	if (err) | 
 | 2542 | 		goto out; | 
 | 2543 |  | 
 | 2544 | 	if (count == 0) | 
 | 2545 | 		goto out; | 
 | 2546 |  | 
| Miklos Szeredi | 2f1936b | 2008-06-24 16:50:14 +0200 | [diff] [blame] | 2547 | 	err = file_remove_suid(file); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2548 | 	if (err) | 
 | 2549 | 		goto out; | 
 | 2550 |  | 
| Christoph Hellwig | 870f481 | 2006-01-09 20:52:01 -0800 | [diff] [blame] | 2551 | 	file_update_time(file); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2552 |  | 
 | 2553 | 	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ | 
 | 2554 | 	if (unlikely(file->f_flags & O_DIRECT)) { | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2555 | 		loff_t endbyte; | 
 | 2556 | 		ssize_t written_buffered; | 
 | 2557 |  | 
 | 2558 | 		written = generic_file_direct_write(iocb, iov, &nr_segs, pos, | 
 | 2559 | 							ppos, count, ocount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2560 | 		if (written < 0 || written == count) | 
 | 2561 | 			goto out; | 
 | 2562 | 		/* | 
 | 2563 | 		 * direct-io write to a hole: fall through to buffered I/O | 
 | 2564 | 		 * for completing the rest of the request. | 
 | 2565 | 		 */ | 
 | 2566 | 		pos += written; | 
 | 2567 | 		count -= written; | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2568 | 		written_buffered = generic_file_buffered_write(iocb, iov, | 
 | 2569 | 						nr_segs, pos, ppos, count, | 
 | 2570 | 						written); | 
 | 2571 | 		/* | 
 | 2572 | 		 * If generic_file_buffered_write() retuned a synchronous error | 
 | 2573 | 		 * then we want to return the number of bytes which were | 
 | 2574 | 		 * direct-written, or the error code if that was zero.  Note | 
 | 2575 | 		 * that this differs from normal direct-io semantics, which | 
 | 2576 | 		 * will return -EFOO even if some bytes were written. | 
 | 2577 | 		 */ | 
 | 2578 | 		if (written_buffered < 0) { | 
 | 2579 | 			err = written_buffered; | 
 | 2580 | 			goto out; | 
 | 2581 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2582 |  | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2583 | 		/* | 
 | 2584 | 		 * We need to ensure that the page cache pages are written to | 
 | 2585 | 		 * disk and invalidated to preserve the expected O_DIRECT | 
 | 2586 | 		 * semantics. | 
 | 2587 | 		 */ | 
 | 2588 | 		endbyte = pos + written_buffered - written - 1; | 
| Christoph Hellwig | c05c4ed | 2009-09-23 15:07:30 +0200 | [diff] [blame] | 2589 | 		err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2590 | 		if (err == 0) { | 
 | 2591 | 			written = written_buffered; | 
 | 2592 | 			invalidate_mapping_pages(mapping, | 
 | 2593 | 						 pos >> PAGE_CACHE_SHIFT, | 
 | 2594 | 						 endbyte >> PAGE_CACHE_SHIFT); | 
 | 2595 | 		} else { | 
 | 2596 | 			/* | 
 | 2597 | 			 * We don't know how much we wrote, so just return | 
 | 2598 | 			 * the number of bytes which were direct-written | 
 | 2599 | 			 */ | 
 | 2600 | 		} | 
 | 2601 | 	} else { | 
 | 2602 | 		written = generic_file_buffered_write(iocb, iov, nr_segs, | 
 | 2603 | 				pos, ppos, count, written); | 
 | 2604 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2605 | out: | 
 | 2606 | 	current->backing_dev_info = NULL; | 
 | 2607 | 	return written ? written : err; | 
 | 2608 | } | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 2609 | EXPORT_SYMBOL(__generic_file_aio_write); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2610 |  | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 2611 | /** | 
 | 2612 |  * generic_file_aio_write - write data to a file | 
 | 2613 |  * @iocb:	IO state structure | 
 | 2614 |  * @iov:	vector with data to write | 
 | 2615 |  * @nr_segs:	number of segments in the vector | 
 | 2616 |  * @pos:	position in file where to write | 
 | 2617 |  * | 
 | 2618 |  * This is a wrapper around __generic_file_aio_write() to be used by most | 
 | 2619 |  * filesystems. It takes care of syncing the file in case of O_SYNC file | 
 | 2620 |  * and acquires i_mutex as needed. | 
 | 2621 |  */ | 
| Badari Pulavarty | 027445c | 2006-09-30 23:28:46 -0700 | [diff] [blame] | 2622 | ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | 
 | 2623 | 		unsigned long nr_segs, loff_t pos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2624 | { | 
 | 2625 | 	struct file *file = iocb->ki_filp; | 
| Jan Kara | 148f948 | 2009-08-17 19:52:36 +0200 | [diff] [blame] | 2626 | 	struct inode *inode = file->f_mapping->host; | 
| Jens Axboe | 55602dd | 2010-06-24 15:05:37 +0200 | [diff] [blame] | 2627 | 	struct blk_plug plug; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2628 | 	ssize_t ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2629 |  | 
 | 2630 | 	BUG_ON(iocb->ki_pos != pos); | 
 | 2631 |  | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 2632 | 	mutex_lock(&inode->i_mutex); | 
| Jens Axboe | 55602dd | 2010-06-24 15:05:37 +0200 | [diff] [blame] | 2633 | 	blk_start_plug(&plug); | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 2634 | 	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 2635 | 	mutex_unlock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2636 |  | 
| Jan Kara | 148f948 | 2009-08-17 19:52:36 +0200 | [diff] [blame] | 2637 | 	if (ret > 0 || ret == -EIOCBQUEUED) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2638 | 		ssize_t err; | 
 | 2639 |  | 
| Jan Kara | 148f948 | 2009-08-17 19:52:36 +0200 | [diff] [blame] | 2640 | 		err = generic_write_sync(file, pos, ret); | 
| Jan Kara | c7b50db | 2009-08-18 16:18:20 +0200 | [diff] [blame] | 2641 | 		if (err < 0 && ret > 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2642 | 			ret = err; | 
 | 2643 | 	} | 
| Jens Axboe | 55602dd | 2010-06-24 15:05:37 +0200 | [diff] [blame] | 2644 | 	blk_finish_plug(&plug); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2645 | 	return ret; | 
 | 2646 | } | 
 | 2647 | EXPORT_SYMBOL(generic_file_aio_write); | 
 | 2648 |  | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 2649 | /** | 
 | 2650 |  * try_to_release_page() - release old fs-specific metadata on a page | 
 | 2651 |  * | 
 | 2652 |  * @page: the page which the kernel is trying to free | 
 | 2653 |  * @gfp_mask: memory allocation flags (and I/O mode) | 
 | 2654 |  * | 
 | 2655 |  * The address_space is to try to release any data against the page | 
 | 2656 |  * (presumably at page->private).  If the release was successful, return `1'. | 
 | 2657 |  * Otherwise return zero. | 
 | 2658 |  * | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 2659 |  * This may also be called if PG_fscache is set on a page, indicating that the | 
 | 2660 |  * page is known to the local caching routines. | 
 | 2661 |  * | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 2662 |  * The @gfp_mask argument specifies whether I/O may be performed to release | 
| Mingming Cao | 3f31fdd | 2008-07-25 01:46:22 -0700 | [diff] [blame] | 2663 |  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 2664 |  * | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 2665 |  */ | 
 | 2666 | int try_to_release_page(struct page *page, gfp_t gfp_mask) | 
 | 2667 | { | 
 | 2668 | 	struct address_space * const mapping = page->mapping; | 
 | 2669 |  | 
 | 2670 | 	BUG_ON(!PageLocked(page)); | 
 | 2671 | 	if (PageWriteback(page)) | 
 | 2672 | 		return 0; | 
 | 2673 |  | 
 | 2674 | 	if (mapping && mapping->a_ops->releasepage) | 
 | 2675 | 		return mapping->a_ops->releasepage(page, gfp_mask); | 
 | 2676 | 	return try_to_free_buffers(page); | 
 | 2677 | } | 
 | 2678 |  | 
 | 2679 | EXPORT_SYMBOL(try_to_release_page); |