| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 2 | *  linux/fs/ext4/inode.c | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (C) 1992, 1993, 1994, 1995 | 
|  | 5 | * Remy Card (card@masi.ibp.fr) | 
|  | 6 | * Laboratoire MASI - Institut Blaise Pascal | 
|  | 7 | * Universite Pierre et Marie Curie (Paris VI) | 
|  | 8 | * | 
|  | 9 | *  from | 
|  | 10 | * | 
|  | 11 | *  linux/fs/minix/inode.c | 
|  | 12 | * | 
|  | 13 | *  Copyright (C) 1991, 1992  Linus Torvalds | 
|  | 14 | * | 
|  | 15 | *  Goal-directed block allocation by Stephen Tweedie | 
|  | 16 | *	(sct@redhat.com), 1993, 1998 | 
|  | 17 | *  Big-endian to little-endian byte-swapping/bitmaps by | 
|  | 18 | *        David S. Miller (davem@caip.rutgers.edu), 1995 | 
|  | 19 | *  64-bit file support on 64-bit platforms by Jakub Jelinek | 
|  | 20 | *	(jj@sunsite.ms.mff.cuni.cz) | 
|  | 21 | * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 22 | *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 23 | */ | 
|  | 24 |  | 
|  | 25 | #include <linux/module.h> | 
|  | 26 | #include <linux/fs.h> | 
|  | 27 | #include <linux/time.h> | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 28 | #include <linux/jbd2.h> | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 29 | #include <linux/highuid.h> | 
|  | 30 | #include <linux/pagemap.h> | 
|  | 31 | #include <linux/quotaops.h> | 
|  | 32 | #include <linux/string.h> | 
|  | 33 | #include <linux/buffer_head.h> | 
|  | 34 | #include <linux/writeback.h> | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 35 | #include <linux/pagevec.h> | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 36 | #include <linux/mpage.h> | 
| Duane Griffin | e83c139 | 2008-12-19 20:47:15 +0000 | [diff] [blame] | 37 | #include <linux/namei.h> | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 38 | #include <linux/uio.h> | 
|  | 39 | #include <linux/bio.h> | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 40 | #include <linux/workqueue.h> | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 41 | #include <linux/kernel.h> | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 42 |  | 
| Christoph Hellwig | 3dcf545 | 2008-04-29 18:13:32 -0400 | [diff] [blame] | 43 | #include "ext4_jbd2.h" | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 44 | #include "xattr.h" | 
|  | 45 | #include "acl.h" | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 46 | #include "ext4_extents.h" | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 47 |  | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 48 | #include <trace/events/ext4.h> | 
|  | 49 |  | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 50 | #define MPAGE_DA_EXTENT_TAIL 0x01 | 
|  | 51 |  | 
| Jan Kara | 678aaf4 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 52 | static inline int ext4_begin_ordered_truncate(struct inode *inode, | 
|  | 53 | loff_t new_size) | 
|  | 54 | { | 
| Jan Kara | 7f5aa21 | 2009-02-10 11:15:34 -0500 | [diff] [blame] | 55 | return jbd2_journal_begin_ordered_truncate( | 
|  | 56 | EXT4_SB(inode->i_sb)->s_journal, | 
|  | 57 | &EXT4_I(inode)->jinode, | 
|  | 58 | new_size); | 
| Jan Kara | 678aaf4 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 59 | } | 
|  | 60 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 61 | static void ext4_invalidatepage(struct page *page, unsigned long offset); | 
|  | 62 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 63 | /* | 
|  | 64 | * Test whether an inode is a fast symlink. | 
|  | 65 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 66 | static int ext4_inode_is_fast_symlink(struct inode *inode) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 67 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 68 | int ea_blocks = EXT4_I(inode)->i_file_acl ? | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 69 | (inode->i_sb->s_blocksize >> 9) : 0; | 
|  | 70 |  | 
|  | 71 | return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); | 
|  | 72 | } | 
|  | 73 |  | 
|  | 74 | /* | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 75 | * Work out how many blocks we need to proceed with the next chunk of a | 
|  | 76 | * truncate transaction. | 
|  | 77 | */ | 
|  | 78 | static unsigned long blocks_for_truncate(struct inode *inode) | 
|  | 79 | { | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 80 | ext4_lblk_t needed; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 81 |  | 
|  | 82 | needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); | 
|  | 83 |  | 
|  | 84 | /* Give ourselves just enough room to cope with inodes in which | 
|  | 85 | * i_blocks is corrupt: we've seen disk corruptions in the past | 
|  | 86 | * which resulted in random data in an inode which looked enough | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 87 | * like a regular file for ext4 to try to delete it.  Things | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 88 | * will go a bit crazy if that happens, but at least we should | 
|  | 89 | * try not to panic the whole kernel. */ | 
|  | 90 | if (needed < 2) | 
|  | 91 | needed = 2; | 
|  | 92 |  | 
|  | 93 | /* But we need to bound the transaction so we don't overflow the | 
|  | 94 | * journal. */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 95 | if (needed > EXT4_MAX_TRANS_DATA) | 
|  | 96 | needed = EXT4_MAX_TRANS_DATA; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 97 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 98 | return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 99 | } | 
|  | 100 |  | 
|  | 101 | /* | 
|  | 102 | * Truncate transactions can be complex and absolutely huge.  So we need to | 
|  | 103 | * be able to restart the transaction at a conventient checkpoint to make | 
|  | 104 | * sure we don't overflow the journal. | 
|  | 105 | * | 
|  | 106 | * start_transaction gets us a new handle for a truncate transaction, | 
|  | 107 | * and extend_transaction tries to extend the existing one a bit.  If | 
|  | 108 | * extend fails, we need to propagate the failure up and restart the | 
|  | 109 | * transaction in the top-level truncate loop. --sct | 
|  | 110 | */ | 
|  | 111 | static handle_t *start_transaction(struct inode *inode) | 
|  | 112 | { | 
|  | 113 | handle_t *result; | 
|  | 114 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 115 | result = ext4_journal_start(inode, blocks_for_truncate(inode)); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 116 | if (!IS_ERR(result)) | 
|  | 117 | return result; | 
|  | 118 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 119 | ext4_std_error(inode->i_sb, PTR_ERR(result)); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 120 | return result; | 
|  | 121 | } | 
|  | 122 |  | 
|  | 123 | /* | 
|  | 124 | * Try to extend this transaction for the purposes of truncation. | 
|  | 125 | * | 
|  | 126 | * Returns 0 if we managed to create more room.  If we can't create more | 
|  | 127 | * room, and the transaction must be restarted we return 1. | 
|  | 128 | */ | 
|  | 129 | static int try_to_extend_transaction(handle_t *handle, struct inode *inode) | 
|  | 130 | { | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 131 | if (!ext4_handle_valid(handle)) | 
|  | 132 | return 0; | 
|  | 133 | if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 134 | return 0; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 135 | if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 136 | return 0; | 
|  | 137 | return 1; | 
|  | 138 | } | 
|  | 139 |  | 
|  | 140 | /* | 
|  | 141 | * Restart the transaction associated with *handle.  This does a commit, | 
|  | 142 | * so before we call here everything must be consistently dirtied against | 
|  | 143 | * this transaction. | 
|  | 144 | */ | 
| Aneesh Kumar K.V | fa5d111 | 2009-11-02 18:50:49 -0500 | [diff] [blame] | 145 | int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, | 
| Jan Kara | 487caee | 2009-08-17 22:17:20 -0400 | [diff] [blame] | 146 | int nblocks) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 147 | { | 
| Jan Kara | 487caee | 2009-08-17 22:17:20 -0400 | [diff] [blame] | 148 | int ret; | 
|  | 149 |  | 
|  | 150 | /* | 
|  | 151 | * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this | 
|  | 152 | * moment, get_block can be called only for blocks inside i_size since | 
|  | 153 | * page cache has been already dropped and writes are blocked by | 
|  | 154 | * i_mutex. So we can safely drop the i_data_sem here. | 
|  | 155 | */ | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 156 | BUG_ON(EXT4_JOURNAL(inode) == NULL); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 157 | jbd_debug(2, "restarting handle %p\n", handle); | 
| Jan Kara | 487caee | 2009-08-17 22:17:20 -0400 | [diff] [blame] | 158 | up_write(&EXT4_I(inode)->i_data_sem); | 
|  | 159 | ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); | 
|  | 160 | down_write(&EXT4_I(inode)->i_data_sem); | 
| Aneesh Kumar K.V | fa5d111 | 2009-11-02 18:50:49 -0500 | [diff] [blame] | 161 | ext4_discard_preallocations(inode); | 
| Jan Kara | 487caee | 2009-08-17 22:17:20 -0400 | [diff] [blame] | 162 |  | 
|  | 163 | return ret; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 164 | } | 
|  | 165 |  | 
|  | 166 | /* | 
|  | 167 | * Called at the last iput() if i_nlink is zero. | 
|  | 168 | */ | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 169 | void ext4_delete_inode(struct inode *inode) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 170 | { | 
|  | 171 | handle_t *handle; | 
| Theodore Ts'o | bc965ab | 2008-08-02 21:10:38 -0400 | [diff] [blame] | 172 | int err; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 173 |  | 
| Christoph Hellwig | 907f455 | 2010-03-03 09:05:06 -0500 | [diff] [blame] | 174 | if (!is_bad_inode(inode)) | 
| Christoph Hellwig | 871a293 | 2010-03-03 09:05:07 -0500 | [diff] [blame] | 175 | dquot_initialize(inode); | 
| Christoph Hellwig | 907f455 | 2010-03-03 09:05:06 -0500 | [diff] [blame] | 176 |  | 
| Jan Kara | 678aaf4 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 177 | if (ext4_should_order_data(inode)) | 
|  | 178 | ext4_begin_ordered_truncate(inode, 0); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 179 | truncate_inode_pages(&inode->i_data, 0); | 
|  | 180 |  | 
|  | 181 | if (is_bad_inode(inode)) | 
|  | 182 | goto no_delete; | 
|  | 183 |  | 
| Theodore Ts'o | bc965ab | 2008-08-02 21:10:38 -0400 | [diff] [blame] | 184 | handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 185 | if (IS_ERR(handle)) { | 
| Theodore Ts'o | bc965ab | 2008-08-02 21:10:38 -0400 | [diff] [blame] | 186 | ext4_std_error(inode->i_sb, PTR_ERR(handle)); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 187 | /* | 
|  | 188 | * If we're going to skip the normal cleanup, we still need to | 
|  | 189 | * make sure that the in-core orphan linked list is properly | 
|  | 190 | * cleaned up. | 
|  | 191 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 192 | ext4_orphan_del(NULL, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 193 | goto no_delete; | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | if (IS_SYNC(inode)) | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 197 | ext4_handle_sync(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 198 | inode->i_size = 0; | 
| Theodore Ts'o | bc965ab | 2008-08-02 21:10:38 -0400 | [diff] [blame] | 199 | err = ext4_mark_inode_dirty(handle, inode); | 
|  | 200 | if (err) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 201 | ext4_warning(inode->i_sb, | 
| Theodore Ts'o | bc965ab | 2008-08-02 21:10:38 -0400 | [diff] [blame] | 202 | "couldn't mark inode dirty (err %d)", err); | 
|  | 203 | goto stop_handle; | 
|  | 204 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 205 | if (inode->i_blocks) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 206 | ext4_truncate(inode); | 
| Theodore Ts'o | bc965ab | 2008-08-02 21:10:38 -0400 | [diff] [blame] | 207 |  | 
|  | 208 | /* | 
|  | 209 | * ext4_ext_truncate() doesn't reserve any slop when it | 
|  | 210 | * restarts journal transactions; therefore there may not be | 
|  | 211 | * enough credits left in the handle to remove the inode from | 
|  | 212 | * the orphan list and set the dtime field. | 
|  | 213 | */ | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 214 | if (!ext4_handle_has_enough_credits(handle, 3)) { | 
| Theodore Ts'o | bc965ab | 2008-08-02 21:10:38 -0400 | [diff] [blame] | 215 | err = ext4_journal_extend(handle, 3); | 
|  | 216 | if (err > 0) | 
|  | 217 | err = ext4_journal_restart(handle, 3); | 
|  | 218 | if (err != 0) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 219 | ext4_warning(inode->i_sb, | 
| Theodore Ts'o | bc965ab | 2008-08-02 21:10:38 -0400 | [diff] [blame] | 220 | "couldn't extend journal (err %d)", err); | 
|  | 221 | stop_handle: | 
|  | 222 | ext4_journal_stop(handle); | 
|  | 223 | goto no_delete; | 
|  | 224 | } | 
|  | 225 | } | 
|  | 226 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 227 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 228 | * Kill off the orphan record which ext4_truncate created. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 229 | * AKPM: I think this can be inside the above `if'. | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 230 | * Note that ext4_orphan_del() has to be able to cope with the | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 231 | * deletion of a non-existent orphan - this is because we don't | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 232 | * know if ext4_truncate() actually created an orphan record. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 233 | * (Well, we could do this if we need to, but heck - it works) | 
|  | 234 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 235 | ext4_orphan_del(handle, inode); | 
|  | 236 | EXT4_I(inode)->i_dtime	= get_seconds(); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 237 |  | 
|  | 238 | /* | 
|  | 239 | * One subtle ordering requirement: if anything has gone wrong | 
|  | 240 | * (transaction abort, IO errors, whatever), then we can still | 
|  | 241 | * do these next steps (the fs will already have been marked as | 
|  | 242 | * having errors), but we can't free the inode if the mark_dirty | 
|  | 243 | * fails. | 
|  | 244 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 245 | if (ext4_mark_inode_dirty(handle, inode)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 246 | /* If that failed, just do the required in-core inode clear. */ | 
|  | 247 | clear_inode(inode); | 
|  | 248 | else | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 249 | ext4_free_inode(handle, inode); | 
|  | 250 | ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 251 | return; | 
|  | 252 | no_delete: | 
|  | 253 | clear_inode(inode);	/* We must guarantee clearing of inode... */ | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | typedef struct { | 
|  | 257 | __le32	*p; | 
|  | 258 | __le32	key; | 
|  | 259 | struct buffer_head *bh; | 
|  | 260 | } Indirect; | 
|  | 261 |  | 
|  | 262 | static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) | 
|  | 263 | { | 
|  | 264 | p->key = *(p->p = v); | 
|  | 265 | p->bh = bh; | 
|  | 266 | } | 
|  | 267 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 268 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 269 | *	ext4_block_to_path - parse the block number into array of offsets | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 270 | *	@inode: inode in question (we are only interested in its superblock) | 
|  | 271 | *	@i_block: block number to be parsed | 
|  | 272 | *	@offsets: array to store the offsets in | 
| Dave Kleikamp | 8c55e20 | 2007-05-24 13:04:54 -0400 | [diff] [blame] | 273 | *	@boundary: set this non-zero if the referred-to block is likely to be | 
|  | 274 | *	       followed (on disk) by an indirect block. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 275 | * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 276 | *	To store the locations of file's data ext4 uses a data structure common | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 277 | *	for UNIX filesystems - tree of pointers anchored in the inode, with | 
|  | 278 | *	data blocks at leaves and indirect blocks in intermediate nodes. | 
|  | 279 | *	This function translates the block number into path in that tree - | 
|  | 280 | *	return value is the path length and @offsets[n] is the offset of | 
|  | 281 | *	pointer to (n+1)th node in the nth one. If @block is out of range | 
|  | 282 | *	(negative or too large) warning is printed and zero returned. | 
|  | 283 | * | 
|  | 284 | *	Note: function doesn't find node addresses, so no IO is needed. All | 
|  | 285 | *	we need to know is the capacity of indirect blocks (taken from the | 
|  | 286 | *	inode->i_sb). | 
|  | 287 | */ | 
|  | 288 |  | 
|  | 289 | /* | 
|  | 290 | * Portability note: the last comparison (check that we fit into triple | 
|  | 291 | * indirect block) is spelled differently, because otherwise on an | 
|  | 292 | * architecture with 32-bit longs and 8Kb pages we might get into trouble | 
|  | 293 | * if our filesystem had 8Kb blocks. We might use long long, but that would | 
|  | 294 | * kill us on x86. Oh, well, at least the sign propagation does not matter - | 
|  | 295 | * i_block would have to be negative in the very beginning, so we would not | 
|  | 296 | * get there at all. | 
|  | 297 | */ | 
|  | 298 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 299 | static int ext4_block_to_path(struct inode *inode, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 300 | ext4_lblk_t i_block, | 
|  | 301 | ext4_lblk_t offsets[4], int *boundary) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 302 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 303 | int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); | 
|  | 304 | int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); | 
|  | 305 | const long direct_blocks = EXT4_NDIR_BLOCKS, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 306 | indirect_blocks = ptrs, | 
|  | 307 | double_blocks = (1 << (ptrs_bits * 2)); | 
|  | 308 | int n = 0; | 
|  | 309 | int final = 0; | 
|  | 310 |  | 
| Roel Kluin | c333e07 | 2009-08-10 22:47:22 -0400 | [diff] [blame] | 311 | if (i_block < direct_blocks) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 312 | offsets[n++] = i_block; | 
|  | 313 | final = direct_blocks; | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 314 | } else if ((i_block -= direct_blocks) < indirect_blocks) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 315 | offsets[n++] = EXT4_IND_BLOCK; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 316 | offsets[n++] = i_block; | 
|  | 317 | final = ptrs; | 
|  | 318 | } else if ((i_block -= indirect_blocks) < double_blocks) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 319 | offsets[n++] = EXT4_DIND_BLOCK; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 320 | offsets[n++] = i_block >> ptrs_bits; | 
|  | 321 | offsets[n++] = i_block & (ptrs - 1); | 
|  | 322 | final = ptrs; | 
|  | 323 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 324 | offsets[n++] = EXT4_TIND_BLOCK; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 325 | offsets[n++] = i_block >> (ptrs_bits * 2); | 
|  | 326 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); | 
|  | 327 | offsets[n++] = i_block & (ptrs - 1); | 
|  | 328 | final = ptrs; | 
|  | 329 | } else { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 330 | ext4_warning(inode->i_sb, "block %lu > max in inode %lu", | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 331 | i_block + direct_blocks + | 
|  | 332 | indirect_blocks + double_blocks, inode->i_ino); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 333 | } | 
|  | 334 | if (boundary) | 
|  | 335 | *boundary = final - 1 - (i_block & (ptrs - 1)); | 
|  | 336 | return n; | 
|  | 337 | } | 
|  | 338 |  | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 339 | static int __ext4_check_blockref(const char *function, struct inode *inode, | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 340 | __le32 *p, unsigned int max) | 
|  | 341 | { | 
| Thiemo Nagel | f73953c | 2009-04-07 18:46:47 -0400 | [diff] [blame] | 342 | __le32 *bref = p; | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 343 | unsigned int blk; | 
|  | 344 |  | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 345 | while (bref < p+max) { | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 346 | blk = le32_to_cpu(*bref++); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 347 | if (blk && | 
|  | 348 | unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 349 | blk, 1))) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 350 | __ext4_error(inode->i_sb, function, | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 351 | "invalid block reference %u " | 
|  | 352 | "in inode #%lu", blk, inode->i_ino); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 353 | return -EIO; | 
|  | 354 | } | 
|  | 355 | } | 
|  | 356 | return 0; | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 357 | } | 
|  | 358 |  | 
|  | 359 |  | 
|  | 360 | #define ext4_check_indirect_blockref(inode, bh)                         \ | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 361 | __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \ | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 362 | EXT4_ADDR_PER_BLOCK((inode)->i_sb)) | 
|  | 363 |  | 
|  | 364 | #define ext4_check_inode_blockref(inode)                                \ | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 365 | __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \ | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 366 | EXT4_NDIR_BLOCKS) | 
|  | 367 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 368 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 369 | *	ext4_get_branch - read the chain of indirect blocks leading to data | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 370 | *	@inode: inode in question | 
|  | 371 | *	@depth: depth of the chain (1 - direct pointer, etc.) | 
|  | 372 | *	@offsets: offsets of pointers in inode/indirect blocks | 
|  | 373 | *	@chain: place to store the result | 
|  | 374 | *	@err: here we store the error value | 
|  | 375 | * | 
|  | 376 | *	Function fills the array of triples <key, p, bh> and returns %NULL | 
|  | 377 | *	if everything went OK or the pointer to the last filled triple | 
|  | 378 | *	(incomplete one) otherwise. Upon the return chain[i].key contains | 
|  | 379 | *	the number of (i+1)-th block in the chain (as it is stored in memory, | 
|  | 380 | *	i.e. little-endian 32-bit), chain[i].p contains the address of that | 
|  | 381 | *	number (it points into struct inode for i==0 and into the bh->b_data | 
|  | 382 | *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect | 
|  | 383 | *	block for i>0 and NULL for i==0. In other words, it holds the block | 
|  | 384 | *	numbers of the chain, addresses they were taken from (and where we can | 
|  | 385 | *	verify that chain did not change) and buffer_heads hosting these | 
|  | 386 | *	numbers. | 
|  | 387 | * | 
|  | 388 | *	Function stops when it stumbles upon zero pointer (absent block) | 
|  | 389 | *		(pointer to last triple returned, *@err == 0) | 
|  | 390 | *	or when it gets an IO error reading an indirect block | 
|  | 391 | *		(ditto, *@err == -EIO) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 392 | *	or when it reads all @depth-1 indirect blocks successfully and finds | 
|  | 393 | *	the whole chain, all way to the data (returns %NULL, *err == 0). | 
| Aneesh Kumar K.V | c278bfe | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 394 | * | 
|  | 395 | *      Need to be called with | 
| Aneesh Kumar K.V | 0e855ac | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 396 | *      down_read(&EXT4_I(inode)->i_data_sem) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 397 | */ | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 398 | static Indirect *ext4_get_branch(struct inode *inode, int depth, | 
|  | 399 | ext4_lblk_t  *offsets, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 400 | Indirect chain[4], int *err) | 
|  | 401 | { | 
|  | 402 | struct super_block *sb = inode->i_sb; | 
|  | 403 | Indirect *p = chain; | 
|  | 404 | struct buffer_head *bh; | 
|  | 405 |  | 
|  | 406 | *err = 0; | 
|  | 407 | /* i_data is not going away, no lock needed */ | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 408 | add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 409 | if (!p->key) | 
|  | 410 | goto no_block; | 
|  | 411 | while (--depth) { | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 412 | bh = sb_getblk(sb, le32_to_cpu(p->key)); | 
|  | 413 | if (unlikely(!bh)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 414 | goto failure; | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 415 |  | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 416 | if (!bh_uptodate_or_lock(bh)) { | 
|  | 417 | if (bh_submit_read(bh) < 0) { | 
|  | 418 | put_bh(bh); | 
|  | 419 | goto failure; | 
|  | 420 | } | 
|  | 421 | /* validate block references */ | 
|  | 422 | if (ext4_check_indirect_blockref(inode, bh)) { | 
|  | 423 | put_bh(bh); | 
|  | 424 | goto failure; | 
|  | 425 | } | 
|  | 426 | } | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 427 |  | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 428 | add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 429 | /* Reader: end */ | 
|  | 430 | if (!p->key) | 
|  | 431 | goto no_block; | 
|  | 432 | } | 
|  | 433 | return NULL; | 
|  | 434 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 435 | failure: | 
|  | 436 | *err = -EIO; | 
|  | 437 | no_block: | 
|  | 438 | return p; | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 442 | *	ext4_find_near - find a place for allocation with sufficient locality | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 443 | *	@inode: owner | 
|  | 444 | *	@ind: descriptor of indirect block. | 
|  | 445 | * | 
| Benoit Boissinot | 1cc8dcf | 2008-04-21 22:45:55 +0000 | [diff] [blame] | 446 | *	This function returns the preferred place for block allocation. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 447 | *	It is used when heuristic for sequential allocation fails. | 
|  | 448 | *	Rules are: | 
|  | 449 | *	  + if there is a block to the left of our position - allocate near it. | 
|  | 450 | *	  + if pointer will live in indirect block - allocate near that block. | 
|  | 451 | *	  + if pointer will live in inode - allocate in the same | 
|  | 452 | *	    cylinder group. | 
|  | 453 | * | 
|  | 454 | * In the latter case we colour the starting block by the callers PID to | 
|  | 455 | * prevent it from clashing with concurrent allocations for a different inode | 
|  | 456 | * in the same block group.   The PID is used here so that functionally related | 
|  | 457 | * files will be close-by on-disk. | 
|  | 458 | * | 
|  | 459 | *	Caller must make sure that @ind is valid and will stay that way. | 
|  | 460 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 461 | static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 462 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 463 | struct ext4_inode_info *ei = EXT4_I(inode); | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 464 | __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 465 | __le32 *p; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 466 | ext4_fsblk_t bg_start; | 
| Valerie Clement | 74d3487 | 2008-02-15 13:43:07 -0500 | [diff] [blame] | 467 | ext4_fsblk_t last_block; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 468 | ext4_grpblk_t colour; | 
| Theodore Ts'o | a491212 | 2009-03-12 12:18:34 -0400 | [diff] [blame] | 469 | ext4_group_t block_group; | 
|  | 470 | int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 471 |  | 
|  | 472 | /* Try to find previous block */ | 
|  | 473 | for (p = ind->p - 1; p >= start; p--) { | 
|  | 474 | if (*p) | 
|  | 475 | return le32_to_cpu(*p); | 
|  | 476 | } | 
|  | 477 |  | 
|  | 478 | /* No such thing, so let's try location of indirect block */ | 
|  | 479 | if (ind->bh) | 
|  | 480 | return ind->bh->b_blocknr; | 
|  | 481 |  | 
|  | 482 | /* | 
|  | 483 | * It is going to be referred to from the inode itself? OK, just put it | 
|  | 484 | * into the same cylinder group then. | 
|  | 485 | */ | 
| Theodore Ts'o | a491212 | 2009-03-12 12:18:34 -0400 | [diff] [blame] | 486 | block_group = ei->i_block_group; | 
|  | 487 | if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { | 
|  | 488 | block_group &= ~(flex_size-1); | 
|  | 489 | if (S_ISREG(inode->i_mode)) | 
|  | 490 | block_group++; | 
|  | 491 | } | 
|  | 492 | bg_start = ext4_group_first_block_no(inode->i_sb, block_group); | 
| Valerie Clement | 74d3487 | 2008-02-15 13:43:07 -0500 | [diff] [blame] | 493 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; | 
|  | 494 |  | 
| Theodore Ts'o | a491212 | 2009-03-12 12:18:34 -0400 | [diff] [blame] | 495 | /* | 
|  | 496 | * If we are doing delayed allocation, we don't need take | 
|  | 497 | * colour into account. | 
|  | 498 | */ | 
|  | 499 | if (test_opt(inode->i_sb, DELALLOC)) | 
|  | 500 | return bg_start; | 
|  | 501 |  | 
| Valerie Clement | 74d3487 | 2008-02-15 13:43:07 -0500 | [diff] [blame] | 502 | if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) | 
|  | 503 | colour = (current->pid % 16) * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 504 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); | 
| Valerie Clement | 74d3487 | 2008-02-15 13:43:07 -0500 | [diff] [blame] | 505 | else | 
|  | 506 | colour = (current->pid % 16) * ((last_block - bg_start) / 16); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 507 | return bg_start + colour; | 
|  | 508 | } | 
|  | 509 |  | 
|  | 510 | /** | 
| Benoit Boissinot | 1cc8dcf | 2008-04-21 22:45:55 +0000 | [diff] [blame] | 511 | *	ext4_find_goal - find a preferred place for allocation. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 512 | *	@inode: owner | 
|  | 513 | *	@block:  block we want | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 514 | *	@partial: pointer to the last triple within a chain | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 515 | * | 
| Benoit Boissinot | 1cc8dcf | 2008-04-21 22:45:55 +0000 | [diff] [blame] | 516 | *	Normally this function find the preferred place for block allocation, | 
| Akinobu Mita | fb01bfd | 2008-02-06 01:40:16 -0800 | [diff] [blame] | 517 | *	returns it. | 
| Eric Sandeen | fb0a387 | 2009-09-16 14:45:10 -0400 | [diff] [blame] | 518 | *	Because this is only used for non-extent files, we limit the block nr | 
|  | 519 | *	to 32 bits. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 520 | */ | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 521 | static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 522 | Indirect *partial) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 523 | { | 
| Eric Sandeen | fb0a387 | 2009-09-16 14:45:10 -0400 | [diff] [blame] | 524 | ext4_fsblk_t goal; | 
|  | 525 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 526 | /* | 
| Theodore Ts'o | c2ea3fd | 2008-10-10 09:40:52 -0400 | [diff] [blame] | 527 | * XXX need to get goal block from mballoc's data structures | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 528 | */ | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 529 |  | 
| Eric Sandeen | fb0a387 | 2009-09-16 14:45:10 -0400 | [diff] [blame] | 530 | goal = ext4_find_near(inode, partial); | 
|  | 531 | goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; | 
|  | 532 | return goal; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 533 | } | 
|  | 534 |  | 
|  | 535 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 536 | *	ext4_blks_to_allocate: Look up the block map and count the number | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 537 | *	of direct blocks need to be allocated for the given branch. | 
|  | 538 | * | 
|  | 539 | *	@branch: chain of indirect blocks | 
|  | 540 | *	@k: number of blocks need for indirect blocks | 
|  | 541 | *	@blks: number of data blocks to be mapped. | 
|  | 542 | *	@blocks_to_boundary:  the offset in the indirect block | 
|  | 543 | * | 
|  | 544 | *	return the total number of blocks to be allocate, including the | 
|  | 545 | *	direct and indirect blocks. | 
|  | 546 | */ | 
| Theodore Ts'o | 498e5f2 | 2008-11-05 00:14:04 -0500 | [diff] [blame] | 547 | static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 548 | int blocks_to_boundary) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 549 | { | 
| Theodore Ts'o | 498e5f2 | 2008-11-05 00:14:04 -0500 | [diff] [blame] | 550 | unsigned int count = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 551 |  | 
|  | 552 | /* | 
|  | 553 | * Simple case, [t,d]Indirect block(s) has not allocated yet | 
|  | 554 | * then it's clear blocks on that path have not allocated | 
|  | 555 | */ | 
|  | 556 | if (k > 0) { | 
|  | 557 | /* right now we don't handle cross boundary allocation */ | 
|  | 558 | if (blks < blocks_to_boundary + 1) | 
|  | 559 | count += blks; | 
|  | 560 | else | 
|  | 561 | count += blocks_to_boundary + 1; | 
|  | 562 | return count; | 
|  | 563 | } | 
|  | 564 |  | 
|  | 565 | count++; | 
|  | 566 | while (count < blks && count <= blocks_to_boundary && | 
|  | 567 | le32_to_cpu(*(branch[0].p + count)) == 0) { | 
|  | 568 | count++; | 
|  | 569 | } | 
|  | 570 | return count; | 
|  | 571 | } | 
|  | 572 |  | 
|  | 573 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 574 | *	ext4_alloc_blocks: multiple allocate blocks needed for a branch | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 575 | *	@indirect_blks: the number of blocks need to allocate for indirect | 
|  | 576 | *			blocks | 
|  | 577 | * | 
|  | 578 | *	@new_blocks: on return it will store the new block numbers for | 
|  | 579 | *	the indirect blocks(if needed) and the first direct block, | 
|  | 580 | *	@blks:	on return it will store the total number of allocated | 
|  | 581 | *		direct blocks | 
|  | 582 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 583 | static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 584 | ext4_lblk_t iblock, ext4_fsblk_t goal, | 
|  | 585 | int indirect_blks, int blks, | 
|  | 586 | ext4_fsblk_t new_blocks[4], int *err) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 587 | { | 
| Theodore Ts'o | 815a113 | 2009-01-01 23:59:43 -0500 | [diff] [blame] | 588 | struct ext4_allocation_request ar; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 589 | int target, i; | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 590 | unsigned long count = 0, blk_allocated = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 591 | int index = 0; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 592 | ext4_fsblk_t current_block = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 593 | int ret = 0; | 
|  | 594 |  | 
|  | 595 | /* | 
|  | 596 | * Here we try to allocate the requested multiple blocks at once, | 
|  | 597 | * on a best-effort basis. | 
|  | 598 | * To build a branch, we should allocate blocks for | 
|  | 599 | * the indirect blocks(if not allocated yet), and at least | 
|  | 600 | * the first direct block of this branch.  That's the | 
|  | 601 | * minimum number of blocks need to allocate(required) | 
|  | 602 | */ | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 603 | /* first we try to allocate the indirect blocks */ | 
|  | 604 | target = indirect_blks; | 
|  | 605 | while (target > 0) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 606 | count = target; | 
|  | 607 | /* allocating blocks for indirect blocks and direct blocks */ | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 608 | current_block = ext4_new_meta_blocks(handle, inode, | 
|  | 609 | goal, &count, err); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 610 | if (*err) | 
|  | 611 | goto failed_out; | 
|  | 612 |  | 
| Frank Mayhar | 273df55 | 2010-03-02 11:46:09 -0500 | [diff] [blame] | 613 | if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) { | 
|  | 614 | EXT4_ERROR_INODE(inode, | 
|  | 615 | "current_block %llu + count %lu > %d!", | 
|  | 616 | current_block, count, | 
|  | 617 | EXT4_MAX_BLOCK_FILE_PHYS); | 
|  | 618 | *err = -EIO; | 
|  | 619 | goto failed_out; | 
|  | 620 | } | 
| Eric Sandeen | fb0a387 | 2009-09-16 14:45:10 -0400 | [diff] [blame] | 621 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 622 | target -= count; | 
|  | 623 | /* allocate blocks for indirect blocks */ | 
|  | 624 | while (index < indirect_blks && count) { | 
|  | 625 | new_blocks[index++] = current_block++; | 
|  | 626 | count--; | 
|  | 627 | } | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 628 | if (count > 0) { | 
|  | 629 | /* | 
|  | 630 | * save the new block number | 
|  | 631 | * for the first direct block | 
|  | 632 | */ | 
|  | 633 | new_blocks[index] = current_block; | 
|  | 634 | printk(KERN_INFO "%s returned more blocks than " | 
|  | 635 | "requested\n", __func__); | 
|  | 636 | WARN_ON(1); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 637 | break; | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 638 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 639 | } | 
|  | 640 |  | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 641 | target = blks - count ; | 
|  | 642 | blk_allocated = count; | 
|  | 643 | if (!target) | 
|  | 644 | goto allocated; | 
|  | 645 | /* Now allocate data blocks */ | 
| Theodore Ts'o | 815a113 | 2009-01-01 23:59:43 -0500 | [diff] [blame] | 646 | memset(&ar, 0, sizeof(ar)); | 
|  | 647 | ar.inode = inode; | 
|  | 648 | ar.goal = goal; | 
|  | 649 | ar.len = target; | 
|  | 650 | ar.logical = iblock; | 
|  | 651 | if (S_ISREG(inode->i_mode)) | 
|  | 652 | /* enable in-core preallocation only for regular files */ | 
|  | 653 | ar.flags = EXT4_MB_HINT_DATA; | 
|  | 654 |  | 
|  | 655 | current_block = ext4_mb_new_blocks(handle, &ar, err); | 
| Frank Mayhar | 273df55 | 2010-03-02 11:46:09 -0500 | [diff] [blame] | 656 | if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) { | 
|  | 657 | EXT4_ERROR_INODE(inode, | 
|  | 658 | "current_block %llu + ar.len %d > %d!", | 
|  | 659 | current_block, ar.len, | 
|  | 660 | EXT4_MAX_BLOCK_FILE_PHYS); | 
|  | 661 | *err = -EIO; | 
|  | 662 | goto failed_out; | 
|  | 663 | } | 
| Theodore Ts'o | 815a113 | 2009-01-01 23:59:43 -0500 | [diff] [blame] | 664 |  | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 665 | if (*err && (target == blks)) { | 
|  | 666 | /* | 
|  | 667 | * if the allocation failed and we didn't allocate | 
|  | 668 | * any blocks before | 
|  | 669 | */ | 
|  | 670 | goto failed_out; | 
|  | 671 | } | 
|  | 672 | if (!*err) { | 
|  | 673 | if (target == blks) { | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 674 | /* | 
|  | 675 | * save the new block number | 
|  | 676 | * for the first direct block | 
|  | 677 | */ | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 678 | new_blocks[index] = current_block; | 
|  | 679 | } | 
| Theodore Ts'o | 815a113 | 2009-01-01 23:59:43 -0500 | [diff] [blame] | 680 | blk_allocated += ar.len; | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 681 | } | 
|  | 682 | allocated: | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 683 | /* total number of blocks allocated for direct blocks */ | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 684 | ret = blk_allocated; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 685 | *err = 0; | 
|  | 686 | return ret; | 
|  | 687 | failed_out: | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 688 | for (i = 0; i < index; i++) | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 689 | ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 690 | return ret; | 
|  | 691 | } | 
|  | 692 |  | 
|  | 693 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 694 | *	ext4_alloc_branch - allocate and set up a chain of blocks. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 695 | *	@inode: owner | 
|  | 696 | *	@indirect_blks: number of allocated indirect blocks | 
|  | 697 | *	@blks: number of allocated direct blocks | 
|  | 698 | *	@offsets: offsets (in the blocks) to store the pointers to next. | 
|  | 699 | *	@branch: place to store the chain in. | 
|  | 700 | * | 
|  | 701 | *	This function allocates blocks, zeroes out all but the last one, | 
|  | 702 | *	links them into chain and (if we are synchronous) writes them to disk. | 
|  | 703 | *	In other words, it prepares a branch that can be spliced onto the | 
|  | 704 | *	inode. It stores the information about that chain in the branch[], in | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 705 | *	the same format as ext4_get_branch() would do. We are calling it after | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 706 | *	we had read the existing part of chain and partial points to the last | 
|  | 707 | *	triple of that (one with zero ->key). Upon the exit we have the same | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 708 | *	picture as after the successful ext4_get_block(), except that in one | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 709 | *	place chain is disconnected - *branch->p is still zero (we did not | 
|  | 710 | *	set the last link), but branch->key contains the number that should | 
|  | 711 | *	be placed into *branch->p to fill that gap. | 
|  | 712 | * | 
|  | 713 | *	If allocation fails we free all blocks we've allocated (and forget | 
|  | 714 | *	their buffer_heads) and return the error value the from failed | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 715 | *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 716 | *	as described above and return 0. | 
|  | 717 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 718 | static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 719 | ext4_lblk_t iblock, int indirect_blks, | 
|  | 720 | int *blks, ext4_fsblk_t goal, | 
|  | 721 | ext4_lblk_t *offsets, Indirect *branch) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 722 | { | 
|  | 723 | int blocksize = inode->i_sb->s_blocksize; | 
|  | 724 | int i, n = 0; | 
|  | 725 | int err = 0; | 
|  | 726 | struct buffer_head *bh; | 
|  | 727 | int num; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 728 | ext4_fsblk_t new_blocks[4]; | 
|  | 729 | ext4_fsblk_t current_block; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 730 |  | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 731 | num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 732 | *blks, new_blocks, &err); | 
|  | 733 | if (err) | 
|  | 734 | return err; | 
|  | 735 |  | 
|  | 736 | branch[0].key = cpu_to_le32(new_blocks[0]); | 
|  | 737 | /* | 
|  | 738 | * metadata blocks and data blocks are allocated. | 
|  | 739 | */ | 
|  | 740 | for (n = 1; n <= indirect_blks;  n++) { | 
|  | 741 | /* | 
|  | 742 | * Get buffer_head for parent block, zero it out | 
|  | 743 | * and set the pointer to new one, then send | 
|  | 744 | * parent to disk. | 
|  | 745 | */ | 
|  | 746 | bh = sb_getblk(inode->i_sb, new_blocks[n-1]); | 
|  | 747 | branch[n].bh = bh; | 
|  | 748 | lock_buffer(bh); | 
|  | 749 | BUFFER_TRACE(bh, "call get_create_access"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 750 | err = ext4_journal_get_create_access(handle, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 751 | if (err) { | 
| Curt Wohlgemuth | 6487a9d | 2009-07-17 10:54:08 -0400 | [diff] [blame] | 752 | /* Don't brelse(bh) here; it's done in | 
|  | 753 | * ext4_journal_forget() below */ | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 754 | unlock_buffer(bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 755 | goto failed; | 
|  | 756 | } | 
|  | 757 |  | 
|  | 758 | memset(bh->b_data, 0, blocksize); | 
|  | 759 | branch[n].p = (__le32 *) bh->b_data + offsets[n]; | 
|  | 760 | branch[n].key = cpu_to_le32(new_blocks[n]); | 
|  | 761 | *branch[n].p = branch[n].key; | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 762 | if (n == indirect_blks) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 763 | current_block = new_blocks[n]; | 
|  | 764 | /* | 
|  | 765 | * End of chain, update the last new metablock of | 
|  | 766 | * the chain to point to the new allocated | 
|  | 767 | * data blocks numbers | 
|  | 768 | */ | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 769 | for (i = 1; i < num; i++) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 770 | *(branch[n].p + i) = cpu_to_le32(++current_block); | 
|  | 771 | } | 
|  | 772 | BUFFER_TRACE(bh, "marking uptodate"); | 
|  | 773 | set_buffer_uptodate(bh); | 
|  | 774 | unlock_buffer(bh); | 
|  | 775 |  | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 776 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 
|  | 777 | err = ext4_handle_dirty_metadata(handle, inode, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 778 | if (err) | 
|  | 779 | goto failed; | 
|  | 780 | } | 
|  | 781 | *blks = num; | 
|  | 782 | return err; | 
|  | 783 | failed: | 
|  | 784 | /* Allocation failed, free what we already allocated */ | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 785 | ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 786 | for (i = 1; i <= n ; i++) { | 
| Theodore Ts'o | b7e57e7 | 2009-11-22 21:00:13 -0500 | [diff] [blame] | 787 | /* | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 788 | * branch[i].bh is newly allocated, so there is no | 
|  | 789 | * need to revoke the block, which is why we don't | 
|  | 790 | * need to set EXT4_FREE_BLOCKS_METADATA. | 
| Theodore Ts'o | b7e57e7 | 2009-11-22 21:00:13 -0500 | [diff] [blame] | 791 | */ | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 792 | ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, | 
|  | 793 | EXT4_FREE_BLOCKS_FORGET); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 794 | } | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 795 | for (i = n+1; i < indirect_blks; i++) | 
|  | 796 | ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 797 |  | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 798 | ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 799 |  | 
|  | 800 | return err; | 
|  | 801 | } | 
|  | 802 |  | 
|  | 803 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 804 | * ext4_splice_branch - splice the allocated branch onto inode. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 805 | * @inode: owner | 
|  | 806 | * @block: (logical) number of block we are adding | 
|  | 807 | * @chain: chain of indirect blocks (with a missing link - see | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 808 | *	ext4_alloc_branch) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 809 | * @where: location of missing link | 
|  | 810 | * @num:   number of indirect blocks we are adding | 
|  | 811 | * @blks:  number of direct blocks we are adding | 
|  | 812 | * | 
|  | 813 | * This function fills the missing link and does all housekeeping needed in | 
|  | 814 | * inode (->i_blocks, etc.). In case of success we end up with the full | 
|  | 815 | * chain to new block and return 0. | 
|  | 816 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 817 | static int ext4_splice_branch(handle_t *handle, struct inode *inode, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 818 | ext4_lblk_t block, Indirect *where, int num, | 
|  | 819 | int blks) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 820 | { | 
|  | 821 | int i; | 
|  | 822 | int err = 0; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 823 | ext4_fsblk_t current_block; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 824 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 825 | /* | 
|  | 826 | * If we're splicing into a [td]indirect block (as opposed to the | 
|  | 827 | * inode) then we need to get write access to the [td]indirect block | 
|  | 828 | * before the splice. | 
|  | 829 | */ | 
|  | 830 | if (where->bh) { | 
|  | 831 | BUFFER_TRACE(where->bh, "get_write_access"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 832 | err = ext4_journal_get_write_access(handle, where->bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 833 | if (err) | 
|  | 834 | goto err_out; | 
|  | 835 | } | 
|  | 836 | /* That's it */ | 
|  | 837 |  | 
|  | 838 | *where->p = where->key; | 
|  | 839 |  | 
|  | 840 | /* | 
|  | 841 | * Update the host buffer_head or inode to point to more just allocated | 
|  | 842 | * direct blocks blocks | 
|  | 843 | */ | 
|  | 844 | if (num == 0 && blks > 1) { | 
|  | 845 | current_block = le32_to_cpu(where->key) + 1; | 
|  | 846 | for (i = 1; i < blks; i++) | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 847 | *(where->p + i) = cpu_to_le32(current_block++); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 848 | } | 
|  | 849 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 850 | /* We are done with atomic stuff, now do the rest of housekeeping */ | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 851 | /* had we spliced it onto indirect block? */ | 
|  | 852 | if (where->bh) { | 
|  | 853 | /* | 
|  | 854 | * If we spliced it onto an indirect block, we haven't | 
|  | 855 | * altered the inode.  Note however that if it is being spliced | 
|  | 856 | * onto an indirect block at the very end of the file (the | 
|  | 857 | * file is growing) then we *will* alter the inode to reflect | 
|  | 858 | * the new i_size.  But that is not done here - it is done in | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 859 | * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 860 | */ | 
|  | 861 | jbd_debug(5, "splicing indirect only\n"); | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 862 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); | 
|  | 863 | err = ext4_handle_dirty_metadata(handle, inode, where->bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 864 | if (err) | 
|  | 865 | goto err_out; | 
|  | 866 | } else { | 
|  | 867 | /* | 
|  | 868 | * OK, we spliced it into the inode itself on a direct block. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 869 | */ | 
| Theodore Ts'o | 4159175 | 2009-06-15 03:41:23 -0400 | [diff] [blame] | 870 | ext4_mark_inode_dirty(handle, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 871 | jbd_debug(5, "splicing direct\n"); | 
|  | 872 | } | 
|  | 873 | return err; | 
|  | 874 |  | 
|  | 875 | err_out: | 
|  | 876 | for (i = 1; i <= num; i++) { | 
| Theodore Ts'o | b7e57e7 | 2009-11-22 21:00:13 -0500 | [diff] [blame] | 877 | /* | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 878 | * branch[i].bh is newly allocated, so there is no | 
|  | 879 | * need to revoke the block, which is why we don't | 
|  | 880 | * need to set EXT4_FREE_BLOCKS_METADATA. | 
| Theodore Ts'o | b7e57e7 | 2009-11-22 21:00:13 -0500 | [diff] [blame] | 881 | */ | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 882 | ext4_free_blocks(handle, inode, where[i].bh, 0, 1, | 
|  | 883 | EXT4_FREE_BLOCKS_FORGET); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 884 | } | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 885 | ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key), | 
|  | 886 | blks, 0); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 887 |  | 
|  | 888 | return err; | 
|  | 889 | } | 
|  | 890 |  | 
|  | 891 | /* | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 892 | * The ext4_ind_get_blocks() function handles non-extents inodes | 
|  | 893 | * (i.e., using the traditional indirect/double-indirect i_blocks | 
|  | 894 | * scheme) for ext4_get_blocks(). | 
|  | 895 | * | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 896 | * Allocation strategy is simple: if we have to allocate something, we will | 
|  | 897 | * have to go the whole way to leaf. So let's do it before attaching anything | 
|  | 898 | * to tree, set linkage between the newborn blocks, write them if sync is | 
|  | 899 | * required, recheck the path, free and repeat if check fails, otherwise | 
|  | 900 | * set the last missing link (that will protect us from any truncate-generated | 
|  | 901 | * removals - all blocks on the path are immune now) and possibly force the | 
|  | 902 | * write on the parent block. | 
|  | 903 | * That has a nice additional property: no special recovery from the failed | 
|  | 904 | * allocations is needed - we simply release blocks and do not touch anything | 
|  | 905 | * reachable from inode. | 
|  | 906 | * | 
|  | 907 | * `handle' can be NULL if create == 0. | 
|  | 908 | * | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 909 | * return > 0, # of blocks mapped or allocated. | 
|  | 910 | * return = 0, if plain lookup failed. | 
|  | 911 | * return < 0, error case. | 
| Aneesh Kumar K.V | c278bfe | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 912 | * | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 913 | * The ext4_ind_get_blocks() function should be called with | 
|  | 914 | * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem | 
|  | 915 | * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or | 
|  | 916 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system | 
|  | 917 | * blocks. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 918 | */ | 
| Theodore Ts'o | e4d996c | 2009-05-12 00:25:28 -0400 | [diff] [blame] | 919 | static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 920 | ext4_lblk_t iblock, unsigned int maxblocks, | 
|  | 921 | struct buffer_head *bh_result, | 
|  | 922 | int flags) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 923 | { | 
|  | 924 | int err = -EIO; | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 925 | ext4_lblk_t offsets[4]; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 926 | Indirect chain[4]; | 
|  | 927 | Indirect *partial; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 928 | ext4_fsblk_t goal; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 929 | int indirect_blks; | 
|  | 930 | int blocks_to_boundary = 0; | 
|  | 931 | int depth; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 932 | int count = 0; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 933 | ext4_fsblk_t first_block = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 934 |  | 
| Alex Tomas | a86c618 | 2006-10-11 01:21:03 -0700 | [diff] [blame] | 935 | J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 936 | J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 937 | depth = ext4_block_to_path(inode, iblock, offsets, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 938 | &blocks_to_boundary); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 939 |  | 
|  | 940 | if (depth == 0) | 
|  | 941 | goto out; | 
|  | 942 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 943 | partial = ext4_get_branch(inode, depth, offsets, chain, &err); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 944 |  | 
|  | 945 | /* Simplest case - block found, no allocation needed */ | 
|  | 946 | if (!partial) { | 
|  | 947 | first_block = le32_to_cpu(chain[depth - 1].key); | 
|  | 948 | clear_buffer_new(bh_result); | 
|  | 949 | count++; | 
|  | 950 | /*map more blocks*/ | 
|  | 951 | while (count < maxblocks && count <= blocks_to_boundary) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 952 | ext4_fsblk_t blk; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 953 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 954 | blk = le32_to_cpu(*(chain[depth-1].p + count)); | 
|  | 955 |  | 
|  | 956 | if (blk == first_block + count) | 
|  | 957 | count++; | 
|  | 958 | else | 
|  | 959 | break; | 
|  | 960 | } | 
| Aneesh Kumar K.V | c278bfe | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 961 | goto got_it; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 962 | } | 
|  | 963 |  | 
|  | 964 | /* Next simple case - plain lookup or failed read of indirect block */ | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 965 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 966 | goto cleanup; | 
|  | 967 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 968 | /* | 
| Theodore Ts'o | c2ea3fd | 2008-10-10 09:40:52 -0400 | [diff] [blame] | 969 | * Okay, we need to do block allocation. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 970 | */ | 
| Akinobu Mita | fb01bfd | 2008-02-06 01:40:16 -0800 | [diff] [blame] | 971 | goal = ext4_find_goal(inode, iblock, partial); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 972 |  | 
|  | 973 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | 
|  | 974 | indirect_blks = (chain + depth) - partial - 1; | 
|  | 975 |  | 
|  | 976 | /* | 
|  | 977 | * Next look up the indirect map to count the totoal number of | 
|  | 978 | * direct blocks to allocate for this branch. | 
|  | 979 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 980 | count = ext4_blks_to_allocate(partial, indirect_blks, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 981 | maxblocks, blocks_to_boundary); | 
|  | 982 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 983 | * Block out ext4_truncate while we alter the tree | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 984 | */ | 
| Aneesh Kumar K.V | 7061eba | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 985 | err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 986 | &count, goal, | 
|  | 987 | offsets + (partial - chain), partial); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 988 |  | 
|  | 989 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 990 | * The ext4_splice_branch call will free and forget any buffers | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 991 | * on the new chain if there is a failure, but that risks using | 
|  | 992 | * up transaction credits, especially for bitmaps where the | 
|  | 993 | * credits cannot be returned.  Can we handle this somehow?  We | 
|  | 994 | * may need to return -EAGAIN upwards in the worst case.  --sct | 
|  | 995 | */ | 
|  | 996 | if (!err) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 997 | err = ext4_splice_branch(handle, inode, iblock, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 998 | partial, indirect_blks, count); | 
| Jan Kara | 2bba702 | 2009-11-23 07:24:48 -0500 | [diff] [blame] | 999 | if (err) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1000 | goto cleanup; | 
|  | 1001 |  | 
|  | 1002 | set_buffer_new(bh_result); | 
| Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 1003 |  | 
|  | 1004 | ext4_update_inode_fsync_trans(handle, inode, 1); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1005 | got_it: | 
|  | 1006 | map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); | 
|  | 1007 | if (count > blocks_to_boundary) | 
|  | 1008 | set_buffer_boundary(bh_result); | 
|  | 1009 | err = count; | 
|  | 1010 | /* Clean up and exit */ | 
|  | 1011 | partial = chain + depth - 1;	/* the whole chain */ | 
|  | 1012 | cleanup: | 
|  | 1013 | while (partial > chain) { | 
|  | 1014 | BUFFER_TRACE(partial->bh, "call brelse"); | 
|  | 1015 | brelse(partial->bh); | 
|  | 1016 | partial--; | 
|  | 1017 | } | 
|  | 1018 | BUFFER_TRACE(bh_result, "returned"); | 
|  | 1019 | out: | 
|  | 1020 | return err; | 
|  | 1021 | } | 
|  | 1022 |  | 
| Dmitry Monakhov | a9e7f44 | 2009-12-14 15:21:14 +0300 | [diff] [blame] | 1023 | #ifdef CONFIG_QUOTA | 
|  | 1024 | qsize_t *ext4_get_reserved_space(struct inode *inode) | 
| Mingming Cao | 60e58e0 | 2009-01-22 18:13:05 +0100 | [diff] [blame] | 1025 | { | 
| Dmitry Monakhov | a9e7f44 | 2009-12-14 15:21:14 +0300 | [diff] [blame] | 1026 | return &EXT4_I(inode)->i_reserved_quota; | 
| Mingming Cao | 60e58e0 | 2009-01-22 18:13:05 +0100 | [diff] [blame] | 1027 | } | 
| Dmitry Monakhov | a9e7f44 | 2009-12-14 15:21:14 +0300 | [diff] [blame] | 1028 | #endif | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1029 |  | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1030 | /* | 
|  | 1031 | * Calculate the number of metadata blocks need to reserve | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1032 | * to allocate a new block at @lblocks for non extent file based file | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1033 | */ | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1034 | static int ext4_indirect_calc_metadata_amount(struct inode *inode, | 
|  | 1035 | sector_t lblock) | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1036 | { | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1037 | struct ext4_inode_info *ei = EXT4_I(inode); | 
| Jan Kara | d330a5b | 2010-03-14 18:17:54 -0400 | [diff] [blame] | 1038 | sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1039 | int blk_bits; | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1040 |  | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1041 | if (lblock < EXT4_NDIR_BLOCKS) | 
|  | 1042 | return 0; | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1043 |  | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1044 | lblock -= EXT4_NDIR_BLOCKS; | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1045 |  | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1046 | if (ei->i_da_metadata_calc_len && | 
|  | 1047 | (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { | 
|  | 1048 | ei->i_da_metadata_calc_len++; | 
|  | 1049 | return 0; | 
|  | 1050 | } | 
|  | 1051 | ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; | 
|  | 1052 | ei->i_da_metadata_calc_len = 1; | 
| Jan Kara | d330a5b | 2010-03-14 18:17:54 -0400 | [diff] [blame] | 1053 | blk_bits = order_base_2(lblock); | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1054 | return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1055 | } | 
|  | 1056 |  | 
|  | 1057 | /* | 
|  | 1058 | * Calculate the number of metadata blocks need to reserve | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1059 | * to allocate a block located at @lblock | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1060 | */ | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1061 | static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1062 | { | 
|  | 1063 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1064 | return ext4_ext_calc_metadata_amount(inode, lblock); | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1065 |  | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1066 | return ext4_indirect_calc_metadata_amount(inode, lblock); | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1067 | } | 
|  | 1068 |  | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1069 | /* | 
|  | 1070 | * Called with i_data_sem down, which is important since we can call | 
|  | 1071 | * ext4_discard_preallocations() from here. | 
|  | 1072 | */ | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1073 | void ext4_da_update_reserve_space(struct inode *inode, | 
|  | 1074 | int used, int quota_claim) | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1075 | { | 
|  | 1076 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1077 | struct ext4_inode_info *ei = EXT4_I(inode); | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1078 | int mdb_free = 0, allocated_meta_blocks = 0; | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1079 |  | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1080 | spin_lock(&ei->i_block_reservation_lock); | 
| Theodore Ts'o | f8ec9d6 | 2010-01-01 01:00:21 -0500 | [diff] [blame] | 1081 | trace_ext4_da_update_reserve_space(inode, used); | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1082 | if (unlikely(used > ei->i_reserved_data_blocks)) { | 
|  | 1083 | ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " | 
|  | 1084 | "with only %d reserved data blocks\n", | 
|  | 1085 | __func__, inode->i_ino, used, | 
|  | 1086 | ei->i_reserved_data_blocks); | 
|  | 1087 | WARN_ON(1); | 
|  | 1088 | used = ei->i_reserved_data_blocks; | 
| Aneesh Kumar K.V | 6bc6e63 | 2008-10-10 09:39:00 -0400 | [diff] [blame] | 1089 | } | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1090 |  | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1091 | /* Update per-inode reservations */ | 
|  | 1092 | ei->i_reserved_data_blocks -= used; | 
|  | 1093 | used += ei->i_allocated_meta_blocks; | 
|  | 1094 | ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1095 | allocated_meta_blocks = ei->i_allocated_meta_blocks; | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1096 | ei->i_allocated_meta_blocks = 0; | 
|  | 1097 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); | 
|  | 1098 |  | 
|  | 1099 | if (ei->i_reserved_data_blocks == 0) { | 
|  | 1100 | /* | 
|  | 1101 | * We can release all of the reserved metadata blocks | 
|  | 1102 | * only when we have written all of the delayed | 
|  | 1103 | * allocation blocks. | 
|  | 1104 | */ | 
| Theodore Ts'o | ee5f4d9 | 2010-01-01 02:36:15 -0500 | [diff] [blame] | 1105 | mdb_free = ei->i_reserved_meta_blocks; | 
|  | 1106 | ei->i_reserved_meta_blocks = 0; | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1107 | ei->i_da_metadata_calc_len = 0; | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1108 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1109 | } | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1110 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 
| Mingming Cao | 60e58e0 | 2009-01-22 18:13:05 +0100 | [diff] [blame] | 1111 |  | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1112 | /* Update quota subsystem */ | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1113 | if (quota_claim) { | 
| Christoph Hellwig | 5dd4056 | 2010-03-03 09:05:00 -0500 | [diff] [blame] | 1114 | dquot_claim_block(inode, used); | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1115 | if (mdb_free) | 
| Christoph Hellwig | 5dd4056 | 2010-03-03 09:05:00 -0500 | [diff] [blame] | 1116 | dquot_release_reservation_block(inode, mdb_free); | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1117 | } else { | 
|  | 1118 | /* | 
|  | 1119 | * We did fallocate with an offset that is already delayed | 
|  | 1120 | * allocated. So on delayed allocated writeback we should | 
|  | 1121 | * not update the quota for allocated blocks. But then | 
|  | 1122 | * converting an fallocate region to initialized region would | 
|  | 1123 | * have caused a metadata allocation. So claim quota for | 
|  | 1124 | * that | 
|  | 1125 | */ | 
|  | 1126 | if (allocated_meta_blocks) | 
| Christoph Hellwig | 5dd4056 | 2010-03-03 09:05:00 -0500 | [diff] [blame] | 1127 | dquot_claim_block(inode, allocated_meta_blocks); | 
|  | 1128 | dquot_release_reservation_block(inode, mdb_free + used); | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1129 | } | 
| Aneesh Kumar K.V | d601430 | 2009-03-27 22:36:43 -0400 | [diff] [blame] | 1130 |  | 
|  | 1131 | /* | 
|  | 1132 | * If we have done all the pending block allocations and if | 
|  | 1133 | * there aren't any writers on the inode, we can discard the | 
|  | 1134 | * inode's preallocations. | 
|  | 1135 | */ | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1136 | if ((ei->i_reserved_data_blocks == 0) && | 
|  | 1137 | (atomic_read(&inode->i_writecount) == 0)) | 
| Aneesh Kumar K.V | d601430 | 2009-03-27 22:36:43 -0400 | [diff] [blame] | 1138 | ext4_discard_preallocations(inode); | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1139 | } | 
|  | 1140 |  | 
| Theodore Ts'o | 80e4246 | 2009-09-08 08:21:26 -0400 | [diff] [blame] | 1141 | static int check_block_validity(struct inode *inode, const char *msg, | 
|  | 1142 | sector_t logical, sector_t phys, int len) | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 1143 | { | 
|  | 1144 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 1145 | __ext4_error(inode->i_sb, msg, | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 1146 | "inode #%lu logical block %llu mapped to %llu " | 
|  | 1147 | "(size %d)", inode->i_ino, | 
|  | 1148 | (unsigned long long) logical, | 
|  | 1149 | (unsigned long long) phys, len); | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 1150 | return -EIO; | 
|  | 1151 | } | 
|  | 1152 | return 0; | 
|  | 1153 | } | 
|  | 1154 |  | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1155 | /* | 
| Theodore Ts'o | 1f94533 | 2009-09-30 22:57:41 -0400 | [diff] [blame] | 1156 | * Return the number of contiguous dirty pages in a given inode | 
|  | 1157 | * starting at page frame idx. | 
| Theodore Ts'o | 55138e0 | 2009-09-29 13:31:31 -0400 | [diff] [blame] | 1158 | */ | 
|  | 1159 | static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, | 
|  | 1160 | unsigned int max_pages) | 
|  | 1161 | { | 
|  | 1162 | struct address_space *mapping = inode->i_mapping; | 
|  | 1163 | pgoff_t	index; | 
|  | 1164 | struct pagevec pvec; | 
|  | 1165 | pgoff_t num = 0; | 
|  | 1166 | int i, nr_pages, done = 0; | 
|  | 1167 |  | 
|  | 1168 | if (max_pages == 0) | 
|  | 1169 | return 0; | 
|  | 1170 | pagevec_init(&pvec, 0); | 
|  | 1171 | while (!done) { | 
|  | 1172 | index = idx; | 
|  | 1173 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 
|  | 1174 | PAGECACHE_TAG_DIRTY, | 
|  | 1175 | (pgoff_t)PAGEVEC_SIZE); | 
|  | 1176 | if (nr_pages == 0) | 
|  | 1177 | break; | 
|  | 1178 | for (i = 0; i < nr_pages; i++) { | 
|  | 1179 | struct page *page = pvec.pages[i]; | 
|  | 1180 | struct buffer_head *bh, *head; | 
|  | 1181 |  | 
|  | 1182 | lock_page(page); | 
|  | 1183 | if (unlikely(page->mapping != mapping) || | 
|  | 1184 | !PageDirty(page) || | 
|  | 1185 | PageWriteback(page) || | 
|  | 1186 | page->index != idx) { | 
|  | 1187 | done = 1; | 
|  | 1188 | unlock_page(page); | 
|  | 1189 | break; | 
|  | 1190 | } | 
| Theodore Ts'o | 1f94533 | 2009-09-30 22:57:41 -0400 | [diff] [blame] | 1191 | if (page_has_buffers(page)) { | 
|  | 1192 | bh = head = page_buffers(page); | 
|  | 1193 | do { | 
|  | 1194 | if (!buffer_delay(bh) && | 
|  | 1195 | !buffer_unwritten(bh)) | 
|  | 1196 | done = 1; | 
|  | 1197 | bh = bh->b_this_page; | 
|  | 1198 | } while (!done && (bh != head)); | 
|  | 1199 | } | 
| Theodore Ts'o | 55138e0 | 2009-09-29 13:31:31 -0400 | [diff] [blame] | 1200 | unlock_page(page); | 
|  | 1201 | if (done) | 
|  | 1202 | break; | 
|  | 1203 | idx++; | 
|  | 1204 | num++; | 
|  | 1205 | if (num >= max_pages) | 
|  | 1206 | break; | 
|  | 1207 | } | 
|  | 1208 | pagevec_release(&pvec); | 
|  | 1209 | } | 
|  | 1210 | return num; | 
|  | 1211 | } | 
|  | 1212 |  | 
|  | 1213 | /* | 
| Theodore Ts'o | 12b7ac1 | 2009-05-14 00:57:44 -0400 | [diff] [blame] | 1214 | * The ext4_get_blocks() function tries to look up the requested blocks, | 
| Theodore Ts'o | 2b2d6d0 | 2008-07-26 16:15:44 -0400 | [diff] [blame] | 1215 | * and returns if the blocks are already mapped. | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1216 | * | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1217 | * Otherwise it takes the write lock of the i_data_sem and allocate blocks | 
|  | 1218 | * and store the allocated blocks in the result buffer head and mark it | 
|  | 1219 | * mapped. | 
|  | 1220 | * | 
|  | 1221 | * If file type is extents based, it will call ext4_ext_get_blocks(), | 
| Theodore Ts'o | e4d996c | 2009-05-12 00:25:28 -0400 | [diff] [blame] | 1222 | * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1223 | * based files | 
|  | 1224 | * | 
|  | 1225 | * On success, it returns the number of blocks being mapped or allocate. | 
|  | 1226 | * if create==0 and the blocks are pre-allocated and uninitialized block, | 
|  | 1227 | * the result buffer head is unmapped. If the create ==1, it will make sure | 
|  | 1228 | * the buffer head is mapped. | 
|  | 1229 | * | 
|  | 1230 | * It returns 0 if plain look up failed (blocks have not been allocated), in | 
|  | 1231 | * that casem, buffer head is unmapped | 
|  | 1232 | * | 
|  | 1233 | * It returns the error in case of allocation failure. | 
|  | 1234 | */ | 
| Theodore Ts'o | 12b7ac1 | 2009-05-14 00:57:44 -0400 | [diff] [blame] | 1235 | int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | 
|  | 1236 | unsigned int max_blocks, struct buffer_head *bh, | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1237 | int flags) | 
| Aneesh Kumar K.V | 0e855ac | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 1238 | { | 
|  | 1239 | int retval; | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1240 |  | 
|  | 1241 | clear_buffer_mapped(bh); | 
| Aneesh Kumar K.V | 2a8964d | 2009-05-14 17:05:39 -0400 | [diff] [blame] | 1242 | clear_buffer_unwritten(bh); | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1243 |  | 
| Mingming Cao | 0031462 | 2009-09-28 15:49:08 -0400 | [diff] [blame] | 1244 | ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u," | 
|  | 1245 | "logical block %lu\n", inode->i_ino, flags, max_blocks, | 
|  | 1246 | (unsigned long)block); | 
| Aneesh Kumar K.V | 4df3d26 | 2008-01-28 23:58:29 -0500 | [diff] [blame] | 1247 | /* | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 1248 | * Try to see if we can get the block without requesting a new | 
|  | 1249 | * file system block. | 
| Aneesh Kumar K.V | 4df3d26 | 2008-01-28 23:58:29 -0500 | [diff] [blame] | 1250 | */ | 
|  | 1251 | down_read((&EXT4_I(inode)->i_data_sem)); | 
|  | 1252 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { | 
|  | 1253 | retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks, | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1254 | bh, 0); | 
| Aneesh Kumar K.V | 0e855ac | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 1255 | } else { | 
| Theodore Ts'o | e4d996c | 2009-05-12 00:25:28 -0400 | [diff] [blame] | 1256 | retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1257 | bh, 0); | 
| Aneesh Kumar K.V | 0e855ac | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 1258 | } | 
| Aneesh Kumar K.V | 4df3d26 | 2008-01-28 23:58:29 -0500 | [diff] [blame] | 1259 | up_read((&EXT4_I(inode)->i_data_sem)); | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1260 |  | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 1261 | if (retval > 0 && buffer_mapped(bh)) { | 
| Theodore Ts'o | 80e4246 | 2009-09-08 08:21:26 -0400 | [diff] [blame] | 1262 | int ret = check_block_validity(inode, "file system corruption", | 
|  | 1263 | block, bh->b_blocknr, retval); | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 1264 | if (ret != 0) | 
|  | 1265 | return ret; | 
|  | 1266 | } | 
|  | 1267 |  | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1268 | /* If it is only a block(s) look up */ | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1269 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) | 
| Aneesh Kumar K.V | 4df3d26 | 2008-01-28 23:58:29 -0500 | [diff] [blame] | 1270 | return retval; | 
|  | 1271 |  | 
|  | 1272 | /* | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1273 | * Returns if the blocks have already allocated | 
|  | 1274 | * | 
|  | 1275 | * Note that if blocks have been preallocated | 
|  | 1276 | * ext4_ext_get_block() returns th create = 0 | 
|  | 1277 | * with buffer head unmapped. | 
|  | 1278 | */ | 
|  | 1279 | if (retval > 0 && buffer_mapped(bh)) | 
|  | 1280 | return retval; | 
|  | 1281 |  | 
|  | 1282 | /* | 
| Aneesh Kumar K.V | 2a8964d | 2009-05-14 17:05:39 -0400 | [diff] [blame] | 1283 | * When we call get_blocks without the create flag, the | 
|  | 1284 | * BH_Unwritten flag could have gotten set if the blocks | 
|  | 1285 | * requested were part of a uninitialized extent.  We need to | 
|  | 1286 | * clear this flag now that we are committed to convert all or | 
|  | 1287 | * part of the uninitialized extent to be an initialized | 
|  | 1288 | * extent.  This is because we need to avoid the combination | 
|  | 1289 | * of BH_Unwritten and BH_Mapped flags being simultaneously | 
|  | 1290 | * set on the buffer_head. | 
|  | 1291 | */ | 
|  | 1292 | clear_buffer_unwritten(bh); | 
|  | 1293 |  | 
|  | 1294 | /* | 
| Mingming Cao | f5ab0d1 | 2008-02-25 15:29:55 -0500 | [diff] [blame] | 1295 | * New blocks allocate and/or writing to uninitialized extent | 
|  | 1296 | * will possibly result in updating i_data, so we take | 
|  | 1297 | * the write lock of i_data_sem, and call get_blocks() | 
|  | 1298 | * with create == 1 flag. | 
| Aneesh Kumar K.V | 4df3d26 | 2008-01-28 23:58:29 -0500 | [diff] [blame] | 1299 | */ | 
|  | 1300 | down_write((&EXT4_I(inode)->i_data_sem)); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1301 |  | 
|  | 1302 | /* | 
|  | 1303 | * if the caller is from delayed allocation writeout path | 
|  | 1304 | * we have already reserved fs blocks for allocation | 
|  | 1305 | * let the underlying get_block() function know to | 
|  | 1306 | * avoid double accounting | 
|  | 1307 | */ | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1308 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1309 | EXT4_I(inode)->i_delalloc_reserved_flag = 1; | 
| Aneesh Kumar K.V | 4df3d26 | 2008-01-28 23:58:29 -0500 | [diff] [blame] | 1310 | /* | 
|  | 1311 | * We need to check for EXT4 here because migrate | 
|  | 1312 | * could have changed the inode type in between | 
|  | 1313 | */ | 
| Aneesh Kumar K.V | 0e855ac | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 1314 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { | 
|  | 1315 | retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks, | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1316 | bh, flags); | 
| Aneesh Kumar K.V | 0e855ac | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 1317 | } else { | 
| Theodore Ts'o | e4d996c | 2009-05-12 00:25:28 -0400 | [diff] [blame] | 1318 | retval = ext4_ind_get_blocks(handle, inode, block, | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1319 | max_blocks, bh, flags); | 
| Aneesh Kumar K.V | 267e4db | 2008-04-29 08:11:12 -0400 | [diff] [blame] | 1320 |  | 
|  | 1321 | if (retval > 0 && buffer_new(bh)) { | 
|  | 1322 | /* | 
|  | 1323 | * We allocated new blocks which will result in | 
|  | 1324 | * i_data's format changing.  Force the migrate | 
|  | 1325 | * to fail by clearing migrate flags | 
|  | 1326 | */ | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 1327 | ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); | 
| Aneesh Kumar K.V | 267e4db | 2008-04-29 08:11:12 -0400 | [diff] [blame] | 1328 | } | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1329 |  | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1330 | /* | 
|  | 1331 | * Update reserved blocks/metadata blocks after successful | 
|  | 1332 | * block allocation which had been deferred till now. We don't | 
|  | 1333 | * support fallocate for non extent files. So we can update | 
|  | 1334 | * reserve space here. | 
|  | 1335 | */ | 
|  | 1336 | if ((retval > 0) && | 
| Aneesh Kumar K.V | 1296cc8 | 2010-01-15 01:27:59 -0500 | [diff] [blame] | 1337 | (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) | 
| Aneesh Kumar K.V | 5f634d0 | 2010-01-25 04:00:31 -0500 | [diff] [blame] | 1338 | ext4_da_update_reserve_space(inode, retval, 1); | 
|  | 1339 | } | 
| Theodore Ts'o | 2ac3b6e | 2009-05-14 13:57:08 -0400 | [diff] [blame] | 1340 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1341 | EXT4_I(inode)->i_delalloc_reserved_flag = 0; | 
| Theodore Ts'o | 2ac3b6e | 2009-05-14 13:57:08 -0400 | [diff] [blame] | 1342 |  | 
| Aneesh Kumar K.V | 4df3d26 | 2008-01-28 23:58:29 -0500 | [diff] [blame] | 1343 | up_write((&EXT4_I(inode)->i_data_sem)); | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 1344 | if (retval > 0 && buffer_mapped(bh)) { | 
| Theodore Ts'o | 80e4246 | 2009-09-08 08:21:26 -0400 | [diff] [blame] | 1345 | int ret = check_block_validity(inode, "file system " | 
|  | 1346 | "corruption after allocation", | 
|  | 1347 | block, bh->b_blocknr, retval); | 
| Theodore Ts'o | 6fd058f | 2009-05-17 15:38:01 -0400 | [diff] [blame] | 1348 | if (ret != 0) | 
|  | 1349 | return ret; | 
|  | 1350 | } | 
| Aneesh Kumar K.V | 0e855ac | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 1351 | return retval; | 
|  | 1352 | } | 
|  | 1353 |  | 
| Mingming Cao | f3bd1f3 | 2008-08-19 22:16:03 -0400 | [diff] [blame] | 1354 | /* Maximum number of blocks we map for direct IO at once. */ | 
|  | 1355 | #define DIO_MAX_BLOCKS 4096 | 
|  | 1356 |  | 
| Eric Sandeen | 6873fa0 | 2008-10-07 00:46:36 -0400 | [diff] [blame] | 1357 | int ext4_get_block(struct inode *inode, sector_t iblock, | 
|  | 1358 | struct buffer_head *bh_result, int create) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1359 | { | 
| Dmitriy Monakhov | 3e4fdaf | 2007-02-10 01:46:35 -0800 | [diff] [blame] | 1360 | handle_t *handle = ext4_journal_current_handle(); | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 1361 | int ret = 0, started = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1362 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | 
| Mingming Cao | f3bd1f3 | 2008-08-19 22:16:03 -0400 | [diff] [blame] | 1363 | int dio_credits; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1364 |  | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 1365 | if (create && !handle) { | 
|  | 1366 | /* Direct IO write... */ | 
|  | 1367 | if (max_blocks > DIO_MAX_BLOCKS) | 
|  | 1368 | max_blocks = DIO_MAX_BLOCKS; | 
| Mingming Cao | f3bd1f3 | 2008-08-19 22:16:03 -0400 | [diff] [blame] | 1369 | dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); | 
|  | 1370 | handle = ext4_journal_start(inode, dio_credits); | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 1371 | if (IS_ERR(handle)) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1372 | ret = PTR_ERR(handle); | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 1373 | goto out; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1374 | } | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 1375 | started = 1; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1376 | } | 
|  | 1377 |  | 
| Theodore Ts'o | 12b7ac1 | 2009-05-14 00:57:44 -0400 | [diff] [blame] | 1378 | ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1379 | create ? EXT4_GET_BLOCKS_CREATE : 0); | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 1380 | if (ret > 0) { | 
|  | 1381 | bh_result->b_size = (ret << inode->i_blkbits); | 
|  | 1382 | ret = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1383 | } | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 1384 | if (started) | 
|  | 1385 | ext4_journal_stop(handle); | 
|  | 1386 | out: | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1387 | return ret; | 
|  | 1388 | } | 
|  | 1389 |  | 
|  | 1390 | /* | 
|  | 1391 | * `handle' can be NULL if create is zero | 
|  | 1392 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1393 | struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 1394 | ext4_lblk_t block, int create, int *errp) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1395 | { | 
|  | 1396 | struct buffer_head dummy; | 
|  | 1397 | int fatal = 0, err; | 
| Jan Kara | 03f5d8b | 2009-06-09 00:17:05 -0400 | [diff] [blame] | 1398 | int flags = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1399 |  | 
|  | 1400 | J_ASSERT(handle != NULL || create == 0); | 
|  | 1401 |  | 
|  | 1402 | dummy.b_state = 0; | 
|  | 1403 | dummy.b_blocknr = -1000; | 
|  | 1404 | buffer_trace_init(&dummy.b_history); | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1405 | if (create) | 
|  | 1406 | flags |= EXT4_GET_BLOCKS_CREATE; | 
|  | 1407 | err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1408 | /* | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 1409 | * ext4_get_blocks() returns number of blocks mapped. 0 in | 
|  | 1410 | * case of a HOLE. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1411 | */ | 
|  | 1412 | if (err > 0) { | 
|  | 1413 | if (err > 1) | 
|  | 1414 | WARN_ON(1); | 
|  | 1415 | err = 0; | 
|  | 1416 | } | 
|  | 1417 | *errp = err; | 
|  | 1418 | if (!err && buffer_mapped(&dummy)) { | 
|  | 1419 | struct buffer_head *bh; | 
|  | 1420 | bh = sb_getblk(inode->i_sb, dummy.b_blocknr); | 
|  | 1421 | if (!bh) { | 
|  | 1422 | *errp = -EIO; | 
|  | 1423 | goto err; | 
|  | 1424 | } | 
|  | 1425 | if (buffer_new(&dummy)) { | 
|  | 1426 | J_ASSERT(create != 0); | 
| Aneesh Kumar K.V | ac39849 | 2007-10-16 18:38:25 -0400 | [diff] [blame] | 1427 | J_ASSERT(handle != NULL); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1428 |  | 
|  | 1429 | /* | 
|  | 1430 | * Now that we do not always journal data, we should | 
|  | 1431 | * keep in mind whether this should always journal the | 
|  | 1432 | * new buffer as metadata.  For now, regular file | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1433 | * writes use ext4_get_block instead, so it's not a | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1434 | * problem. | 
|  | 1435 | */ | 
|  | 1436 | lock_buffer(bh); | 
|  | 1437 | BUFFER_TRACE(bh, "call get_create_access"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1438 | fatal = ext4_journal_get_create_access(handle, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1439 | if (!fatal && !buffer_uptodate(bh)) { | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1440 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1441 | set_buffer_uptodate(bh); | 
|  | 1442 | } | 
|  | 1443 | unlock_buffer(bh); | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 1444 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 
|  | 1445 | err = ext4_handle_dirty_metadata(handle, inode, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1446 | if (!fatal) | 
|  | 1447 | fatal = err; | 
|  | 1448 | } else { | 
|  | 1449 | BUFFER_TRACE(bh, "not a new buffer"); | 
|  | 1450 | } | 
|  | 1451 | if (fatal) { | 
|  | 1452 | *errp = fatal; | 
|  | 1453 | brelse(bh); | 
|  | 1454 | bh = NULL; | 
|  | 1455 | } | 
|  | 1456 | return bh; | 
|  | 1457 | } | 
|  | 1458 | err: | 
|  | 1459 | return NULL; | 
|  | 1460 | } | 
|  | 1461 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1462 | struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 1463 | ext4_lblk_t block, int create, int *err) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1464 | { | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1465 | struct buffer_head *bh; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1466 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1467 | bh = ext4_getblk(handle, inode, block, create, err); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1468 | if (!bh) | 
|  | 1469 | return bh; | 
|  | 1470 | if (buffer_uptodate(bh)) | 
|  | 1471 | return bh; | 
|  | 1472 | ll_rw_block(READ_META, 1, &bh); | 
|  | 1473 | wait_on_buffer(bh); | 
|  | 1474 | if (buffer_uptodate(bh)) | 
|  | 1475 | return bh; | 
|  | 1476 | put_bh(bh); | 
|  | 1477 | *err = -EIO; | 
|  | 1478 | return NULL; | 
|  | 1479 | } | 
|  | 1480 |  | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1481 | static int walk_page_buffers(handle_t *handle, | 
|  | 1482 | struct buffer_head *head, | 
|  | 1483 | unsigned from, | 
|  | 1484 | unsigned to, | 
|  | 1485 | int *partial, | 
|  | 1486 | int (*fn)(handle_t *handle, | 
|  | 1487 | struct buffer_head *bh)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1488 | { | 
|  | 1489 | struct buffer_head *bh; | 
|  | 1490 | unsigned block_start, block_end; | 
|  | 1491 | unsigned blocksize = head->b_size; | 
|  | 1492 | int err, ret = 0; | 
|  | 1493 | struct buffer_head *next; | 
|  | 1494 |  | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1495 | for (bh = head, block_start = 0; | 
|  | 1496 | ret == 0 && (bh != head || !block_start); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1497 | block_start = block_end, bh = next) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1498 | next = bh->b_this_page; | 
|  | 1499 | block_end = block_start + blocksize; | 
|  | 1500 | if (block_end <= from || block_start >= to) { | 
|  | 1501 | if (partial && !buffer_uptodate(bh)) | 
|  | 1502 | *partial = 1; | 
|  | 1503 | continue; | 
|  | 1504 | } | 
|  | 1505 | err = (*fn)(handle, bh); | 
|  | 1506 | if (!ret) | 
|  | 1507 | ret = err; | 
|  | 1508 | } | 
|  | 1509 | return ret; | 
|  | 1510 | } | 
|  | 1511 |  | 
|  | 1512 | /* | 
|  | 1513 | * To preserve ordering, it is essential that the hole instantiation and | 
|  | 1514 | * the data write be encapsulated in a single transaction.  We cannot | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1515 | * close off a transaction and start a new one between the ext4_get_block() | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 1516 | * and the commit_write().  So doing the jbd2_journal_start at the start of | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1517 | * prepare_write() is the right place. | 
|  | 1518 | * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1519 | * Also, this function can nest inside ext4_writepage() -> | 
|  | 1520 | * block_write_full_page(). In that case, we *know* that ext4_writepage() | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1521 | * has generated enough buffer credits to do the whole page.  So we won't | 
|  | 1522 | * block on the journal in that case, which is good, because the caller may | 
|  | 1523 | * be PF_MEMALLOC. | 
|  | 1524 | * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1525 | * By accident, ext4 can be reentered when a transaction is open via | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1526 | * quota file writes.  If we were to commit the transaction while thus | 
|  | 1527 | * reentered, there can be a deadlock - we would be holding a quota | 
|  | 1528 | * lock, and the commit would never complete if another thread had a | 
|  | 1529 | * transaction open and was blocking on the quota lock - a ranking | 
|  | 1530 | * violation. | 
|  | 1531 | * | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 1532 | * So what we do is to rely on the fact that jbd2_journal_stop/journal_start | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1533 | * will _not_ run commit under these circumstances because handle->h_ref | 
|  | 1534 | * is elevated.  We'll still have enough credits for the tiny quotafile | 
|  | 1535 | * write. | 
|  | 1536 | */ | 
|  | 1537 | static int do_journal_get_write_access(handle_t *handle, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1538 | struct buffer_head *bh) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1539 | { | 
|  | 1540 | if (!buffer_mapped(bh) || buffer_freed(bh)) | 
|  | 1541 | return 0; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1542 | return ext4_journal_get_write_access(handle, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1543 | } | 
|  | 1544 |  | 
| Jan Kara | b9a4207 | 2009-12-08 21:24:33 -0500 | [diff] [blame] | 1545 | /* | 
|  | 1546 | * Truncate blocks that were not used by write. We have to truncate the | 
|  | 1547 | * pagecache as well so that corresponding buffers get properly unmapped. | 
|  | 1548 | */ | 
|  | 1549 | static void ext4_truncate_failed_write(struct inode *inode) | 
|  | 1550 | { | 
|  | 1551 | truncate_inode_pages(inode->i_mapping, inode->i_size); | 
|  | 1552 | ext4_truncate(inode); | 
|  | 1553 | } | 
|  | 1554 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 1555 | static int ext4_get_block_write(struct inode *inode, sector_t iblock, | 
|  | 1556 | struct buffer_head *bh_result, int create); | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1557 | static int ext4_write_begin(struct file *file, struct address_space *mapping, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1558 | loff_t pos, unsigned len, unsigned flags, | 
|  | 1559 | struct page **pagep, void **fsdata) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1560 | { | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1561 | struct inode *inode = mapping->host; | 
| Aneesh Kumar K.V | 1938a15 | 2009-06-05 01:00:26 -0400 | [diff] [blame] | 1562 | int ret, needed_blocks; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1563 | handle_t *handle; | 
|  | 1564 | int retries = 0; | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1565 | struct page *page; | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1566 | pgoff_t index; | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1567 | unsigned from, to; | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1568 |  | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 1569 | trace_ext4_write_begin(inode, pos, len, flags); | 
| Aneesh Kumar K.V | 1938a15 | 2009-06-05 01:00:26 -0400 | [diff] [blame] | 1570 | /* | 
|  | 1571 | * Reserve one block more for addition to orphan list in case | 
|  | 1572 | * we allocate blocks but write fails for some reason | 
|  | 1573 | */ | 
|  | 1574 | needed_blocks = ext4_writepage_trans_blocks(inode) + 1; | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1575 | index = pos >> PAGE_CACHE_SHIFT; | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1576 | from = pos & (PAGE_CACHE_SIZE - 1); | 
|  | 1577 | to = from + len; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1578 |  | 
|  | 1579 | retry: | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1580 | handle = ext4_journal_start(inode, needed_blocks); | 
|  | 1581 | if (IS_ERR(handle)) { | 
|  | 1582 | ret = PTR_ERR(handle); | 
|  | 1583 | goto out; | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1584 | } | 
|  | 1585 |  | 
| Jan Kara | ebd3610 | 2009-02-22 21:09:59 -0500 | [diff] [blame] | 1586 | /* We cannot recurse into the filesystem as the transaction is already | 
|  | 1587 | * started */ | 
|  | 1588 | flags |= AOP_FLAG_NOFS; | 
|  | 1589 |  | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 1590 | page = grab_cache_page_write_begin(mapping, index, flags); | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1591 | if (!page) { | 
|  | 1592 | ext4_journal_stop(handle); | 
|  | 1593 | ret = -ENOMEM; | 
|  | 1594 | goto out; | 
|  | 1595 | } | 
|  | 1596 | *pagep = page; | 
|  | 1597 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 1598 | if (ext4_should_dioread_nolock(inode)) | 
|  | 1599 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, | 
|  | 1600 | fsdata, ext4_get_block_write); | 
|  | 1601 | else | 
|  | 1602 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, | 
|  | 1603 | fsdata, ext4_get_block); | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1604 |  | 
|  | 1605 | if (!ret && ext4_should_journal_data(inode)) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1606 | ret = walk_page_buffers(handle, page_buffers(page), | 
|  | 1607 | from, to, NULL, do_journal_get_write_access); | 
|  | 1608 | } | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1609 |  | 
|  | 1610 | if (ret) { | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1611 | unlock_page(page); | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 1612 | page_cache_release(page); | 
| Aneesh Kumar K.V | ae4d537 | 2008-09-13 13:10:25 -0400 | [diff] [blame] | 1613 | /* | 
|  | 1614 | * block_write_begin may have instantiated a few blocks | 
|  | 1615 | * outside i_size.  Trim these off again. Don't need | 
|  | 1616 | * i_size_read because we hold i_mutex. | 
| Aneesh Kumar K.V | 1938a15 | 2009-06-05 01:00:26 -0400 | [diff] [blame] | 1617 | * | 
|  | 1618 | * Add inode to orphan list in case we crash before | 
|  | 1619 | * truncate finishes | 
| Aneesh Kumar K.V | ae4d537 | 2008-09-13 13:10:25 -0400 | [diff] [blame] | 1620 | */ | 
| Jan Kara | ffacfa7 | 2009-07-13 16:22:22 -0400 | [diff] [blame] | 1621 | if (pos + len > inode->i_size && ext4_can_truncate(inode)) | 
| Aneesh Kumar K.V | 1938a15 | 2009-06-05 01:00:26 -0400 | [diff] [blame] | 1622 | ext4_orphan_add(handle, inode); | 
|  | 1623 |  | 
|  | 1624 | ext4_journal_stop(handle); | 
|  | 1625 | if (pos + len > inode->i_size) { | 
| Jan Kara | b9a4207 | 2009-12-08 21:24:33 -0500 | [diff] [blame] | 1626 | ext4_truncate_failed_write(inode); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1627 | /* | 
| Jan Kara | ffacfa7 | 2009-07-13 16:22:22 -0400 | [diff] [blame] | 1628 | * If truncate failed early the inode might | 
| Aneesh Kumar K.V | 1938a15 | 2009-06-05 01:00:26 -0400 | [diff] [blame] | 1629 | * still be on the orphan list; we need to | 
|  | 1630 | * make sure the inode is removed from the | 
|  | 1631 | * orphan list in that case. | 
|  | 1632 | */ | 
|  | 1633 | if (inode->i_nlink) | 
|  | 1634 | ext4_orphan_del(NULL, inode); | 
|  | 1635 | } | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1636 | } | 
|  | 1637 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1638 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1639 | goto retry; | 
| Andrew Morton | 7479d2b | 2007-04-01 23:49:44 -0700 | [diff] [blame] | 1640 | out: | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1641 | return ret; | 
|  | 1642 | } | 
|  | 1643 |  | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1644 | /* For write_end() in data=journal mode */ | 
|  | 1645 | static int write_end_fn(handle_t *handle, struct buffer_head *bh) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1646 | { | 
|  | 1647 | if (!buffer_mapped(bh) || buffer_freed(bh)) | 
|  | 1648 | return 0; | 
|  | 1649 | set_buffer_uptodate(bh); | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 1650 | return ext4_handle_dirty_metadata(handle, NULL, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1651 | } | 
|  | 1652 |  | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1653 | static int ext4_generic_write_end(struct file *file, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1654 | struct address_space *mapping, | 
|  | 1655 | loff_t pos, unsigned len, unsigned copied, | 
|  | 1656 | struct page *page, void *fsdata) | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1657 | { | 
|  | 1658 | int i_size_changed = 0; | 
|  | 1659 | struct inode *inode = mapping->host; | 
|  | 1660 | handle_t *handle = ext4_journal_current_handle(); | 
|  | 1661 |  | 
|  | 1662 | copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); | 
|  | 1663 |  | 
|  | 1664 | /* | 
|  | 1665 | * No need to use i_size_read() here, the i_size | 
|  | 1666 | * cannot change under us because we hold i_mutex. | 
|  | 1667 | * | 
|  | 1668 | * But it's important to update i_size while still holding page lock: | 
|  | 1669 | * page writeout could otherwise come in and zero beyond i_size. | 
|  | 1670 | */ | 
|  | 1671 | if (pos + copied > inode->i_size) { | 
|  | 1672 | i_size_write(inode, pos + copied); | 
|  | 1673 | i_size_changed = 1; | 
|  | 1674 | } | 
|  | 1675 |  | 
|  | 1676 | if (pos + copied >  EXT4_I(inode)->i_disksize) { | 
|  | 1677 | /* We need to mark inode dirty even if | 
|  | 1678 | * new_i_size is less that inode->i_size | 
|  | 1679 | * bu greater than i_disksize.(hint delalloc) | 
|  | 1680 | */ | 
|  | 1681 | ext4_update_i_disksize(inode, (pos + copied)); | 
|  | 1682 | i_size_changed = 1; | 
|  | 1683 | } | 
|  | 1684 | unlock_page(page); | 
|  | 1685 | page_cache_release(page); | 
|  | 1686 |  | 
|  | 1687 | /* | 
|  | 1688 | * Don't mark the inode dirty under page lock. First, it unnecessarily | 
|  | 1689 | * makes the holding time of page lock longer. Second, it forces lock | 
|  | 1690 | * ordering of page lock and transaction start for journaling | 
|  | 1691 | * filesystems. | 
|  | 1692 | */ | 
|  | 1693 | if (i_size_changed) | 
|  | 1694 | ext4_mark_inode_dirty(handle, inode); | 
|  | 1695 |  | 
|  | 1696 | return copied; | 
|  | 1697 | } | 
|  | 1698 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1699 | /* | 
|  | 1700 | * We need to pick up the new inode size which generic_commit_write gave us | 
|  | 1701 | * `file' can be NULL - eg, when called from page_symlink(). | 
|  | 1702 | * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1703 | * ext4 never places buffers on inode->i_mapping->private_list.  metadata | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1704 | * buffers are managed internally. | 
|  | 1705 | */ | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1706 | static int ext4_ordered_write_end(struct file *file, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1707 | struct address_space *mapping, | 
|  | 1708 | loff_t pos, unsigned len, unsigned copied, | 
|  | 1709 | struct page *page, void *fsdata) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1710 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1711 | handle_t *handle = ext4_journal_current_handle(); | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1712 | struct inode *inode = mapping->host; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1713 | int ret = 0, ret2; | 
|  | 1714 |  | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 1715 | trace_ext4_ordered_write_end(inode, pos, len, copied); | 
| Jan Kara | 678aaf4 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1716 | ret = ext4_jbd2_file_inode(handle, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1717 |  | 
|  | 1718 | if (ret == 0) { | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1719 | ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1720 | page, fsdata); | 
| Roel Kluin | f8a87d8 | 2008-04-29 22:01:18 -0400 | [diff] [blame] | 1721 | copied = ret2; | 
| Jan Kara | ffacfa7 | 2009-07-13 16:22:22 -0400 | [diff] [blame] | 1722 | if (pos + len > inode->i_size && ext4_can_truncate(inode)) | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1723 | /* if we have allocated more blocks and copied | 
|  | 1724 | * less. We will have blocks allocated outside | 
|  | 1725 | * inode->i_size. So truncate them | 
|  | 1726 | */ | 
|  | 1727 | ext4_orphan_add(handle, inode); | 
| Roel Kluin | f8a87d8 | 2008-04-29 22:01:18 -0400 | [diff] [blame] | 1728 | if (ret2 < 0) | 
|  | 1729 | ret = ret2; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1730 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1731 | ret2 = ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1732 | if (!ret) | 
|  | 1733 | ret = ret2; | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1734 |  | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1735 | if (pos + len > inode->i_size) { | 
| Jan Kara | b9a4207 | 2009-12-08 21:24:33 -0500 | [diff] [blame] | 1736 | ext4_truncate_failed_write(inode); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1737 | /* | 
| Jan Kara | ffacfa7 | 2009-07-13 16:22:22 -0400 | [diff] [blame] | 1738 | * If truncate failed early the inode might still be | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1739 | * on the orphan list; we need to make sure the inode | 
|  | 1740 | * is removed from the orphan list in that case. | 
|  | 1741 | */ | 
|  | 1742 | if (inode->i_nlink) | 
|  | 1743 | ext4_orphan_del(NULL, inode); | 
|  | 1744 | } | 
|  | 1745 |  | 
|  | 1746 |  | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1747 | return ret ? ret : copied; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1748 | } | 
|  | 1749 |  | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1750 | static int ext4_writeback_write_end(struct file *file, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1751 | struct address_space *mapping, | 
|  | 1752 | loff_t pos, unsigned len, unsigned copied, | 
|  | 1753 | struct page *page, void *fsdata) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1754 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1755 | handle_t *handle = ext4_journal_current_handle(); | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1756 | struct inode *inode = mapping->host; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1757 | int ret = 0, ret2; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1758 |  | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 1759 | trace_ext4_writeback_write_end(inode, pos, len, copied); | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1760 | ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1761 | page, fsdata); | 
| Roel Kluin | f8a87d8 | 2008-04-29 22:01:18 -0400 | [diff] [blame] | 1762 | copied = ret2; | 
| Jan Kara | ffacfa7 | 2009-07-13 16:22:22 -0400 | [diff] [blame] | 1763 | if (pos + len > inode->i_size && ext4_can_truncate(inode)) | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1764 | /* if we have allocated more blocks and copied | 
|  | 1765 | * less. We will have blocks allocated outside | 
|  | 1766 | * inode->i_size. So truncate them | 
|  | 1767 | */ | 
|  | 1768 | ext4_orphan_add(handle, inode); | 
|  | 1769 |  | 
| Roel Kluin | f8a87d8 | 2008-04-29 22:01:18 -0400 | [diff] [blame] | 1770 | if (ret2 < 0) | 
|  | 1771 | ret = ret2; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1772 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1773 | ret2 = ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1774 | if (!ret) | 
|  | 1775 | ret = ret2; | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1776 |  | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1777 | if (pos + len > inode->i_size) { | 
| Jan Kara | b9a4207 | 2009-12-08 21:24:33 -0500 | [diff] [blame] | 1778 | ext4_truncate_failed_write(inode); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1779 | /* | 
| Jan Kara | ffacfa7 | 2009-07-13 16:22:22 -0400 | [diff] [blame] | 1780 | * If truncate failed early the inode might still be | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1781 | * on the orphan list; we need to make sure the inode | 
|  | 1782 | * is removed from the orphan list in that case. | 
|  | 1783 | */ | 
|  | 1784 | if (inode->i_nlink) | 
|  | 1785 | ext4_orphan_del(NULL, inode); | 
|  | 1786 | } | 
|  | 1787 |  | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1788 | return ret ? ret : copied; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1789 | } | 
|  | 1790 |  | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1791 | static int ext4_journalled_write_end(struct file *file, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1792 | struct address_space *mapping, | 
|  | 1793 | loff_t pos, unsigned len, unsigned copied, | 
|  | 1794 | struct page *page, void *fsdata) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1795 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1796 | handle_t *handle = ext4_journal_current_handle(); | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1797 | struct inode *inode = mapping->host; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1798 | int ret = 0, ret2; | 
|  | 1799 | int partial = 0; | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1800 | unsigned from, to; | 
| Aneesh Kumar K.V | cf17fea | 2008-09-13 13:06:18 -0400 | [diff] [blame] | 1801 | loff_t new_i_size; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1802 |  | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 1803 | trace_ext4_journalled_write_end(inode, pos, len, copied); | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1804 | from = pos & (PAGE_CACHE_SIZE - 1); | 
|  | 1805 | to = from + len; | 
|  | 1806 |  | 
|  | 1807 | if (copied < len) { | 
|  | 1808 | if (!PageUptodate(page)) | 
|  | 1809 | copied = 0; | 
|  | 1810 | page_zero_new_buffers(page, from+copied, to); | 
|  | 1811 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1812 |  | 
|  | 1813 | ret = walk_page_buffers(handle, page_buffers(page), from, | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1814 | to, &partial, write_end_fn); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1815 | if (!partial) | 
|  | 1816 | SetPageUptodate(page); | 
| Aneesh Kumar K.V | cf17fea | 2008-09-13 13:06:18 -0400 | [diff] [blame] | 1817 | new_i_size = pos + copied; | 
|  | 1818 | if (new_i_size > inode->i_size) | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1819 | i_size_write(inode, pos+copied); | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 1820 | ext4_set_inode_state(inode, EXT4_STATE_JDATA); | 
| Aneesh Kumar K.V | cf17fea | 2008-09-13 13:06:18 -0400 | [diff] [blame] | 1821 | if (new_i_size > EXT4_I(inode)->i_disksize) { | 
|  | 1822 | ext4_update_i_disksize(inode, new_i_size); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1823 | ret2 = ext4_mark_inode_dirty(handle, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1824 | if (!ret) | 
|  | 1825 | ret = ret2; | 
|  | 1826 | } | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1827 |  | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1828 | unlock_page(page); | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1829 | page_cache_release(page); | 
| Jan Kara | ffacfa7 | 2009-07-13 16:22:22 -0400 | [diff] [blame] | 1830 | if (pos + len > inode->i_size && ext4_can_truncate(inode)) | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1831 | /* if we have allocated more blocks and copied | 
|  | 1832 | * less. We will have blocks allocated outside | 
|  | 1833 | * inode->i_size. So truncate them | 
|  | 1834 | */ | 
|  | 1835 | ext4_orphan_add(handle, inode); | 
|  | 1836 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 1837 | ret2 = ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1838 | if (!ret) | 
|  | 1839 | ret = ret2; | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1840 | if (pos + len > inode->i_size) { | 
| Jan Kara | b9a4207 | 2009-12-08 21:24:33 -0500 | [diff] [blame] | 1841 | ext4_truncate_failed_write(inode); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1842 | /* | 
| Jan Kara | ffacfa7 | 2009-07-13 16:22:22 -0400 | [diff] [blame] | 1843 | * If truncate failed early the inode might still be | 
| Aneesh Kumar K.V | f851408 | 2009-06-05 00:56:49 -0400 | [diff] [blame] | 1844 | * on the orphan list; we need to make sure the inode | 
|  | 1845 | * is removed from the orphan list in that case. | 
|  | 1846 | */ | 
|  | 1847 | if (inode->i_nlink) | 
|  | 1848 | ext4_orphan_del(NULL, inode); | 
|  | 1849 | } | 
| Nick Piggin | bfc1af6 | 2007-10-16 01:25:05 -0700 | [diff] [blame] | 1850 |  | 
|  | 1851 | return ret ? ret : copied; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1852 | } | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1853 |  | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1854 | /* | 
|  | 1855 | * Reserve a single block located at lblock | 
|  | 1856 | */ | 
|  | 1857 | static int ext4_da_reserve_space(struct inode *inode, sector_t lblock) | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1858 | { | 
| Aneesh Kumar K.V | 030ba6b | 2008-09-08 23:14:50 -0400 | [diff] [blame] | 1859 | int retries = 0; | 
| Mingming Cao | 60e58e0 | 2009-01-22 18:13:05 +0100 | [diff] [blame] | 1860 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1861 | struct ext4_inode_info *ei = EXT4_I(inode); | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1862 | unsigned long md_needed, md_reserved; | 
| Christoph Hellwig | 5dd4056 | 2010-03-03 09:05:00 -0500 | [diff] [blame] | 1863 | int ret; | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1864 |  | 
|  | 1865 | /* | 
|  | 1866 | * recalculate the amount of metadata blocks to reserve | 
|  | 1867 | * in order to allocate nrblocks | 
|  | 1868 | * worse case is one extent per block | 
|  | 1869 | */ | 
| Aneesh Kumar K.V | 030ba6b | 2008-09-08 23:14:50 -0400 | [diff] [blame] | 1870 | repeat: | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1871 | spin_lock(&ei->i_block_reservation_lock); | 
|  | 1872 | md_reserved = ei->i_reserved_meta_blocks; | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1873 | md_needed = ext4_calc_metadata_amount(inode, lblock); | 
| Theodore Ts'o | f8ec9d6 | 2010-01-01 01:00:21 -0500 | [diff] [blame] | 1874 | trace_ext4_da_reserve_space(inode, md_needed); | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1875 | spin_unlock(&ei->i_block_reservation_lock); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1876 |  | 
| Mingming Cao | 60e58e0 | 2009-01-22 18:13:05 +0100 | [diff] [blame] | 1877 | /* | 
|  | 1878 | * Make quota reservation here to prevent quota overflow | 
|  | 1879 | * later. Real quota accounting is done at pages writeout | 
|  | 1880 | * time. | 
|  | 1881 | */ | 
| Christoph Hellwig | 5dd4056 | 2010-03-03 09:05:00 -0500 | [diff] [blame] | 1882 | ret = dquot_reserve_block(inode, md_needed + 1); | 
|  | 1883 | if (ret) | 
|  | 1884 | return ret; | 
| Mingming Cao | 60e58e0 | 2009-01-22 18:13:05 +0100 | [diff] [blame] | 1885 |  | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1886 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { | 
| Christoph Hellwig | 5dd4056 | 2010-03-03 09:05:00 -0500 | [diff] [blame] | 1887 | dquot_release_reservation_block(inode, md_needed + 1); | 
| Aneesh Kumar K.V | 030ba6b | 2008-09-08 23:14:50 -0400 | [diff] [blame] | 1888 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 
|  | 1889 | yield(); | 
|  | 1890 | goto repeat; | 
|  | 1891 | } | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1892 | return -ENOSPC; | 
|  | 1893 | } | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1894 | spin_lock(&ei->i_block_reservation_lock); | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1895 | ei->i_reserved_data_blocks++; | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1896 | ei->i_reserved_meta_blocks += md_needed; | 
|  | 1897 | spin_unlock(&ei->i_block_reservation_lock); | 
| Dmitry Monakhov | 39bc680 | 2009-12-10 16:36:27 +0000 | [diff] [blame] | 1898 |  | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1899 | return 0;       /* success */ | 
|  | 1900 | } | 
|  | 1901 |  | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1902 | static void ext4_da_release_space(struct inode *inode, int to_free) | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1903 | { | 
|  | 1904 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1905 | struct ext4_inode_info *ei = EXT4_I(inode); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1906 |  | 
| Mingming Cao | cd21322 | 2008-08-19 22:16:59 -0400 | [diff] [blame] | 1907 | if (!to_free) | 
|  | 1908 | return;		/* Nothing to release, exit */ | 
|  | 1909 |  | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1910 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | 
| Mingming Cao | cd21322 | 2008-08-19 22:16:59 -0400 | [diff] [blame] | 1911 |  | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1912 | if (unlikely(to_free > ei->i_reserved_data_blocks)) { | 
| Mingming Cao | cd21322 | 2008-08-19 22:16:59 -0400 | [diff] [blame] | 1913 | /* | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1914 | * if there aren't enough reserved blocks, then the | 
|  | 1915 | * counter is messed up somewhere.  Since this | 
|  | 1916 | * function is called from invalidate page, it's | 
|  | 1917 | * harmless to return without any action. | 
| Mingming Cao | cd21322 | 2008-08-19 22:16:59 -0400 | [diff] [blame] | 1918 | */ | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1919 | ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " | 
|  | 1920 | "ino %lu, to_free %d with only %d reserved " | 
|  | 1921 | "data blocks\n", inode->i_ino, to_free, | 
|  | 1922 | ei->i_reserved_data_blocks); | 
|  | 1923 | WARN_ON(1); | 
|  | 1924 | to_free = ei->i_reserved_data_blocks; | 
|  | 1925 | } | 
|  | 1926 | ei->i_reserved_data_blocks -= to_free; | 
|  | 1927 |  | 
|  | 1928 | if (ei->i_reserved_data_blocks == 0) { | 
|  | 1929 | /* | 
|  | 1930 | * We can release all of the reserved metadata blocks | 
|  | 1931 | * only when we have written all of the delayed | 
|  | 1932 | * allocation blocks. | 
|  | 1933 | */ | 
| Theodore Ts'o | ee5f4d9 | 2010-01-01 02:36:15 -0500 | [diff] [blame] | 1934 | to_free += ei->i_reserved_meta_blocks; | 
|  | 1935 | ei->i_reserved_meta_blocks = 0; | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 1936 | ei->i_da_metadata_calc_len = 0; | 
| Mingming Cao | cd21322 | 2008-08-19 22:16:59 -0400 | [diff] [blame] | 1937 | } | 
|  | 1938 |  | 
| Theodore Ts'o | 0637c6f | 2009-12-30 14:20:45 -0500 | [diff] [blame] | 1939 | /* update fs dirty blocks counter */ | 
|  | 1940 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1941 |  | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1942 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 
| Mingming Cao | 60e58e0 | 2009-01-22 18:13:05 +0100 | [diff] [blame] | 1943 |  | 
| Christoph Hellwig | 5dd4056 | 2010-03-03 09:05:00 -0500 | [diff] [blame] | 1944 | dquot_release_reservation_block(inode, to_free); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1945 | } | 
|  | 1946 |  | 
|  | 1947 | static void ext4_da_page_release_reservation(struct page *page, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 1948 | unsigned long offset) | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1949 | { | 
|  | 1950 | int to_release = 0; | 
|  | 1951 | struct buffer_head *head, *bh; | 
|  | 1952 | unsigned int curr_off = 0; | 
|  | 1953 |  | 
|  | 1954 | head = page_buffers(page); | 
|  | 1955 | bh = head; | 
|  | 1956 | do { | 
|  | 1957 | unsigned int next_off = curr_off + bh->b_size; | 
|  | 1958 |  | 
|  | 1959 | if ((offset <= curr_off) && (buffer_delay(bh))) { | 
|  | 1960 | to_release++; | 
|  | 1961 | clear_buffer_delay(bh); | 
|  | 1962 | } | 
|  | 1963 | curr_off = next_off; | 
|  | 1964 | } while ((bh = bh->b_this_page) != head); | 
| Aneesh Kumar K.V | 12219ae | 2008-07-17 16:12:08 -0400 | [diff] [blame] | 1965 | ext4_da_release_space(page->mapping->host, to_release); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 1966 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1967 |  | 
|  | 1968 | /* | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1969 | * Delayed allocation stuff | 
|  | 1970 | */ | 
|  | 1971 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1972 | /* | 
|  | 1973 | * mpage_da_submit_io - walks through extent of pages and try to write | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 1974 | * them with writepage() call back | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1975 | * | 
|  | 1976 | * @mpd->inode: inode | 
|  | 1977 | * @mpd->first_page: first page of the extent | 
|  | 1978 | * @mpd->next_page: page after the last page of the extent | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1979 | * | 
|  | 1980 | * By the time mpage_da_submit_io() is called we expect all blocks | 
|  | 1981 | * to be allocated. this may be wrong if allocation failed. | 
|  | 1982 | * | 
|  | 1983 | * As pages are already locked by write_cache_pages(), we can't use it | 
|  | 1984 | */ | 
|  | 1985 | static int mpage_da_submit_io(struct mpage_da_data *mpd) | 
|  | 1986 | { | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 1987 | long pages_skipped; | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 1988 | struct pagevec pvec; | 
|  | 1989 | unsigned long index, end; | 
|  | 1990 | int ret = 0, err, nr_pages, i; | 
|  | 1991 | struct inode *inode = mpd->inode; | 
|  | 1992 | struct address_space *mapping = inode->i_mapping; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 1993 |  | 
|  | 1994 | BUG_ON(mpd->next_page <= mpd->first_page); | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 1995 | /* | 
|  | 1996 | * We need to start from the first_page to the next_page - 1 | 
|  | 1997 | * to make sure we also write the mapped dirty buffer_heads. | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 1998 | * If we look at mpd->b_blocknr we would only be looking | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 1999 | * at the currently mapped buffer_heads. | 
|  | 2000 | */ | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2001 | index = mpd->first_page; | 
|  | 2002 | end = mpd->next_page - 1; | 
|  | 2003 |  | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 2004 | pagevec_init(&pvec, 0); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2005 | while (index <= end) { | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 2006 | nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2007 | if (nr_pages == 0) | 
|  | 2008 | break; | 
|  | 2009 | for (i = 0; i < nr_pages; i++) { | 
|  | 2010 | struct page *page = pvec.pages[i]; | 
|  | 2011 |  | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 2012 | index = page->index; | 
|  | 2013 | if (index > end) | 
|  | 2014 | break; | 
|  | 2015 | index++; | 
|  | 2016 |  | 
|  | 2017 | BUG_ON(!PageLocked(page)); | 
|  | 2018 | BUG_ON(PageWriteback(page)); | 
|  | 2019 |  | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2020 | pages_skipped = mpd->wbc->pages_skipped; | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2021 | err = mapping->a_ops->writepage(page, mpd->wbc); | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2022 | if (!err && (pages_skipped == mpd->wbc->pages_skipped)) | 
|  | 2023 | /* | 
|  | 2024 | * have successfully written the page | 
|  | 2025 | * without skipping the same | 
|  | 2026 | */ | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2027 | mpd->pages_written++; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2028 | /* | 
|  | 2029 | * In error case, we have to continue because | 
|  | 2030 | * remaining pages are still locked | 
|  | 2031 | * XXX: unlock and re-dirty them? | 
|  | 2032 | */ | 
|  | 2033 | if (ret == 0) | 
|  | 2034 | ret = err; | 
|  | 2035 | } | 
|  | 2036 | pagevec_release(&pvec); | 
|  | 2037 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2038 | return ret; | 
|  | 2039 | } | 
|  | 2040 |  | 
|  | 2041 | /* | 
|  | 2042 | * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers | 
|  | 2043 | * | 
|  | 2044 | * @mpd->inode - inode to walk through | 
|  | 2045 | * @exbh->b_blocknr - first block on a disk | 
|  | 2046 | * @exbh->b_size - amount of space in bytes | 
|  | 2047 | * @logical - first logical block to start assignment with | 
|  | 2048 | * | 
|  | 2049 | * the function goes through all passed space and put actual disk | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 2050 | * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2051 | */ | 
|  | 2052 | static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, | 
|  | 2053 | struct buffer_head *exbh) | 
|  | 2054 | { | 
|  | 2055 | struct inode *inode = mpd->inode; | 
|  | 2056 | struct address_space *mapping = inode->i_mapping; | 
|  | 2057 | int blocks = exbh->b_size >> inode->i_blkbits; | 
|  | 2058 | sector_t pblock = exbh->b_blocknr, cur_logical; | 
|  | 2059 | struct buffer_head *head, *bh; | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2060 | pgoff_t index, end; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2061 | struct pagevec pvec; | 
|  | 2062 | int nr_pages, i; | 
|  | 2063 |  | 
|  | 2064 | index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); | 
|  | 2065 | end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); | 
|  | 2066 | cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); | 
|  | 2067 |  | 
|  | 2068 | pagevec_init(&pvec, 0); | 
|  | 2069 |  | 
|  | 2070 | while (index <= end) { | 
|  | 2071 | /* XXX: optimize tail */ | 
|  | 2072 | nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); | 
|  | 2073 | if (nr_pages == 0) | 
|  | 2074 | break; | 
|  | 2075 | for (i = 0; i < nr_pages; i++) { | 
|  | 2076 | struct page *page = pvec.pages[i]; | 
|  | 2077 |  | 
|  | 2078 | index = page->index; | 
|  | 2079 | if (index > end) | 
|  | 2080 | break; | 
|  | 2081 | index++; | 
|  | 2082 |  | 
|  | 2083 | BUG_ON(!PageLocked(page)); | 
|  | 2084 | BUG_ON(PageWriteback(page)); | 
|  | 2085 | BUG_ON(!page_has_buffers(page)); | 
|  | 2086 |  | 
|  | 2087 | bh = page_buffers(page); | 
|  | 2088 | head = bh; | 
|  | 2089 |  | 
|  | 2090 | /* skip blocks out of the range */ | 
|  | 2091 | do { | 
|  | 2092 | if (cur_logical >= logical) | 
|  | 2093 | break; | 
|  | 2094 | cur_logical++; | 
|  | 2095 | } while ((bh = bh->b_this_page) != head); | 
|  | 2096 |  | 
|  | 2097 | do { | 
|  | 2098 | if (cur_logical >= logical + blocks) | 
|  | 2099 | break; | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 2100 |  | 
|  | 2101 | if (buffer_delay(bh) || | 
|  | 2102 | buffer_unwritten(bh)) { | 
|  | 2103 |  | 
|  | 2104 | BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); | 
|  | 2105 |  | 
|  | 2106 | if (buffer_delay(bh)) { | 
|  | 2107 | clear_buffer_delay(bh); | 
|  | 2108 | bh->b_blocknr = pblock; | 
|  | 2109 | } else { | 
|  | 2110 | /* | 
|  | 2111 | * unwritten already should have | 
|  | 2112 | * blocknr assigned. Verify that | 
|  | 2113 | */ | 
|  | 2114 | clear_buffer_unwritten(bh); | 
|  | 2115 | BUG_ON(bh->b_blocknr != pblock); | 
|  | 2116 | } | 
|  | 2117 |  | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2118 | } else if (buffer_mapped(bh)) | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2119 | BUG_ON(bh->b_blocknr != pblock); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2120 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 2121 | if (buffer_uninit(exbh)) | 
|  | 2122 | set_buffer_uninit(bh); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2123 | cur_logical++; | 
|  | 2124 | pblock++; | 
|  | 2125 | } while ((bh = bh->b_this_page) != head); | 
|  | 2126 | } | 
|  | 2127 | pagevec_release(&pvec); | 
|  | 2128 | } | 
|  | 2129 | } | 
|  | 2130 |  | 
|  | 2131 |  | 
|  | 2132 | /* | 
|  | 2133 | * __unmap_underlying_blocks - just a helper function to unmap | 
|  | 2134 | * set of blocks described by @bh | 
|  | 2135 | */ | 
|  | 2136 | static inline void __unmap_underlying_blocks(struct inode *inode, | 
|  | 2137 | struct buffer_head *bh) | 
|  | 2138 | { | 
|  | 2139 | struct block_device *bdev = inode->i_sb->s_bdev; | 
|  | 2140 | int blocks, i; | 
|  | 2141 |  | 
|  | 2142 | blocks = bh->b_size >> inode->i_blkbits; | 
|  | 2143 | for (i = 0; i < blocks; i++) | 
|  | 2144 | unmap_underlying_metadata(bdev, bh->b_blocknr + i); | 
|  | 2145 | } | 
|  | 2146 |  | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2147 | static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, | 
|  | 2148 | sector_t logical, long blk_cnt) | 
|  | 2149 | { | 
|  | 2150 | int nr_pages, i; | 
|  | 2151 | pgoff_t index, end; | 
|  | 2152 | struct pagevec pvec; | 
|  | 2153 | struct inode *inode = mpd->inode; | 
|  | 2154 | struct address_space *mapping = inode->i_mapping; | 
|  | 2155 |  | 
|  | 2156 | index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); | 
|  | 2157 | end   = (logical + blk_cnt - 1) >> | 
|  | 2158 | (PAGE_CACHE_SHIFT - inode->i_blkbits); | 
|  | 2159 | while (index <= end) { | 
|  | 2160 | nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); | 
|  | 2161 | if (nr_pages == 0) | 
|  | 2162 | break; | 
|  | 2163 | for (i = 0; i < nr_pages; i++) { | 
|  | 2164 | struct page *page = pvec.pages[i]; | 
| Jan Kara | 9b1d0998 | 2010-03-03 16:19:32 -0500 | [diff] [blame] | 2165 | if (page->index > end) | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2166 | break; | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2167 | BUG_ON(!PageLocked(page)); | 
|  | 2168 | BUG_ON(PageWriteback(page)); | 
|  | 2169 | block_invalidatepage(page, 0); | 
|  | 2170 | ClearPageUptodate(page); | 
|  | 2171 | unlock_page(page); | 
|  | 2172 | } | 
| Jan Kara | 9b1d0998 | 2010-03-03 16:19:32 -0500 | [diff] [blame] | 2173 | index = pvec.pages[nr_pages - 1]->index + 1; | 
|  | 2174 | pagevec_release(&pvec); | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2175 | } | 
|  | 2176 | return; | 
|  | 2177 | } | 
|  | 2178 |  | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2179 | static void ext4_print_free_blocks(struct inode *inode) | 
|  | 2180 | { | 
|  | 2181 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 
| Theodore Ts'o | 1693918 | 2009-09-26 17:43:59 -0400 | [diff] [blame] | 2182 | printk(KERN_CRIT "Total free blocks count %lld\n", | 
|  | 2183 | ext4_count_free_blocks(inode->i_sb)); | 
|  | 2184 | printk(KERN_CRIT "Free/Dirty block details\n"); | 
|  | 2185 | printk(KERN_CRIT "free_blocks=%lld\n", | 
|  | 2186 | (long long) percpu_counter_sum(&sbi->s_freeblocks_counter)); | 
|  | 2187 | printk(KERN_CRIT "dirty_blocks=%lld\n", | 
|  | 2188 | (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter)); | 
|  | 2189 | printk(KERN_CRIT "Block reservation details\n"); | 
|  | 2190 | printk(KERN_CRIT "i_reserved_data_blocks=%u\n", | 
|  | 2191 | EXT4_I(inode)->i_reserved_data_blocks); | 
|  | 2192 | printk(KERN_CRIT "i_reserved_meta_blocks=%u\n", | 
|  | 2193 | EXT4_I(inode)->i_reserved_meta_blocks); | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2194 | return; | 
|  | 2195 | } | 
|  | 2196 |  | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 2197 | /* | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2198 | * mpage_da_map_blocks - go through given space | 
|  | 2199 | * | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2200 | * @mpd - bh describing space | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2201 | * | 
|  | 2202 | * The function skips space we know is already mapped to disk blocks. | 
|  | 2203 | * | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2204 | */ | 
| Theodore Ts'o | ed5bde0 | 2009-02-23 10:48:07 -0500 | [diff] [blame] | 2205 | static int mpage_da_map_blocks(struct mpage_da_data *mpd) | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2206 | { | 
| Theodore Ts'o | 2ac3b6e | 2009-05-14 13:57:08 -0400 | [diff] [blame] | 2207 | int err, blks, get_blocks_flags; | 
| Aneesh Kumar K.V | 030ba6b | 2008-09-08 23:14:50 -0400 | [diff] [blame] | 2208 | struct buffer_head new; | 
| Theodore Ts'o | 2fa3cdf | 2009-05-14 09:29:45 -0400 | [diff] [blame] | 2209 | sector_t next = mpd->b_blocknr; | 
|  | 2210 | unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; | 
|  | 2211 | loff_t disksize = EXT4_I(mpd->inode)->i_disksize; | 
|  | 2212 | handle_t *handle = NULL; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2213 |  | 
|  | 2214 | /* | 
|  | 2215 | * We consider only non-mapped and non-allocated blocks | 
|  | 2216 | */ | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2217 | if ((mpd->b_state  & (1 << BH_Mapped)) && | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 2218 | !(mpd->b_state & (1 << BH_Delay)) && | 
|  | 2219 | !(mpd->b_state & (1 << BH_Unwritten))) | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2220 | return 0; | 
| Theodore Ts'o | 2fa3cdf | 2009-05-14 09:29:45 -0400 | [diff] [blame] | 2221 |  | 
|  | 2222 | /* | 
|  | 2223 | * If we didn't accumulate anything to write simply return | 
|  | 2224 | */ | 
|  | 2225 | if (!mpd->b_size) | 
|  | 2226 | return 0; | 
|  | 2227 |  | 
|  | 2228 | handle = ext4_journal_current_handle(); | 
|  | 2229 | BUG_ON(!handle); | 
|  | 2230 |  | 
| Aneesh Kumar K.V | 79ffab3 | 2009-05-13 15:13:42 -0400 | [diff] [blame] | 2231 | /* | 
| Theodore Ts'o | 2ac3b6e | 2009-05-14 13:57:08 -0400 | [diff] [blame] | 2232 | * Call ext4_get_blocks() to allocate any delayed allocation | 
|  | 2233 | * blocks, or to convert an uninitialized extent to be | 
|  | 2234 | * initialized (in the case where we have written into | 
|  | 2235 | * one or more preallocated blocks). | 
|  | 2236 | * | 
|  | 2237 | * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to | 
|  | 2238 | * indicate that we are on the delayed allocation path.  This | 
|  | 2239 | * affects functions in many different parts of the allocation | 
|  | 2240 | * call path.  This flag exists primarily because we don't | 
|  | 2241 | * want to change *many* call functions, so ext4_get_blocks() | 
|  | 2242 | * will set the magic i_delalloc_reserved_flag once the | 
|  | 2243 | * inode's allocation semaphore is taken. | 
|  | 2244 | * | 
|  | 2245 | * If the blocks in questions were delalloc blocks, set | 
|  | 2246 | * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting | 
|  | 2247 | * variables are updated after the blocks have been allocated. | 
| Aneesh Kumar K.V | 79ffab3 | 2009-05-13 15:13:42 -0400 | [diff] [blame] | 2248 | */ | 
| Theodore Ts'o | 2ac3b6e | 2009-05-14 13:57:08 -0400 | [diff] [blame] | 2249 | new.b_state = 0; | 
| Aneesh Kumar K.V | 1296cc8 | 2010-01-15 01:27:59 -0500 | [diff] [blame] | 2250 | get_blocks_flags = EXT4_GET_BLOCKS_CREATE; | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 2251 | if (ext4_should_dioread_nolock(mpd->inode)) | 
|  | 2252 | get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; | 
| Theodore Ts'o | 2ac3b6e | 2009-05-14 13:57:08 -0400 | [diff] [blame] | 2253 | if (mpd->b_state & (1 << BH_Delay)) | 
| Aneesh Kumar K.V | 1296cc8 | 2010-01-15 01:27:59 -0500 | [diff] [blame] | 2254 | get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; | 
|  | 2255 |  | 
| Theodore Ts'o | 2fa3cdf | 2009-05-14 09:29:45 -0400 | [diff] [blame] | 2256 | blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, | 
| Theodore Ts'o | 2ac3b6e | 2009-05-14 13:57:08 -0400 | [diff] [blame] | 2257 | &new, get_blocks_flags); | 
| Theodore Ts'o | 2fa3cdf | 2009-05-14 09:29:45 -0400 | [diff] [blame] | 2258 | if (blks < 0) { | 
|  | 2259 | err = blks; | 
| Theodore Ts'o | ed5bde0 | 2009-02-23 10:48:07 -0500 | [diff] [blame] | 2260 | /* | 
|  | 2261 | * If get block returns with error we simply | 
|  | 2262 | * return. Later writepage will redirty the page and | 
|  | 2263 | * writepages will find the dirty page again | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2264 | */ | 
|  | 2265 | if (err == -EAGAIN) | 
|  | 2266 | return 0; | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2267 |  | 
|  | 2268 | if (err == -ENOSPC && | 
| Theodore Ts'o | ed5bde0 | 2009-02-23 10:48:07 -0500 | [diff] [blame] | 2269 | ext4_count_free_blocks(mpd->inode->i_sb)) { | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2270 | mpd->retval = err; | 
|  | 2271 | return 0; | 
|  | 2272 | } | 
|  | 2273 |  | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2274 | /* | 
| Theodore Ts'o | ed5bde0 | 2009-02-23 10:48:07 -0500 | [diff] [blame] | 2275 | * get block failure will cause us to loop in | 
|  | 2276 | * writepages, because a_ops->writepage won't be able | 
|  | 2277 | * to make progress. The page will be redirtied by | 
|  | 2278 | * writepage and writepages will again try to write | 
|  | 2279 | * the same. | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2280 | */ | 
| Theodore Ts'o | 1693918 | 2009-09-26 17:43:59 -0400 | [diff] [blame] | 2281 | ext4_msg(mpd->inode->i_sb, KERN_CRIT, | 
|  | 2282 | "delayed block allocation failed for inode %lu at " | 
|  | 2283 | "logical offset %llu with max blocks %zd with " | 
|  | 2284 | "error %d\n", mpd->inode->i_ino, | 
|  | 2285 | (unsigned long long) next, | 
|  | 2286 | mpd->b_size >> mpd->inode->i_blkbits, err); | 
|  | 2287 | printk(KERN_CRIT "This should not happen!!  " | 
|  | 2288 | "Data will be lost\n"); | 
| Aneesh Kumar K.V | 030ba6b | 2008-09-08 23:14:50 -0400 | [diff] [blame] | 2289 | if (err == -ENOSPC) { | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2290 | ext4_print_free_blocks(mpd->inode); | 
| Aneesh Kumar K.V | 030ba6b | 2008-09-08 23:14:50 -0400 | [diff] [blame] | 2291 | } | 
| Theodore Ts'o | 2fa3cdf | 2009-05-14 09:29:45 -0400 | [diff] [blame] | 2292 | /* invalidate all the pages */ | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2293 | ext4_da_block_invalidatepages(mpd, next, | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2294 | mpd->b_size >> mpd->inode->i_blkbits); | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2295 | return err; | 
|  | 2296 | } | 
| Theodore Ts'o | 2fa3cdf | 2009-05-14 09:29:45 -0400 | [diff] [blame] | 2297 | BUG_ON(blks == 0); | 
|  | 2298 |  | 
|  | 2299 | new.b_size = (blks << mpd->inode->i_blkbits); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2300 |  | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2301 | if (buffer_new(&new)) | 
|  | 2302 | __unmap_underlying_blocks(mpd->inode, &new); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2303 |  | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2304 | /* | 
|  | 2305 | * If blocks are delayed marked, we need to | 
|  | 2306 | * put actual blocknr and drop delayed bit | 
|  | 2307 | */ | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2308 | if ((mpd->b_state & (1 << BH_Delay)) || | 
|  | 2309 | (mpd->b_state & (1 << BH_Unwritten))) | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2310 | mpage_put_bnr_to_bhs(mpd, next, &new); | 
|  | 2311 |  | 
| Theodore Ts'o | 2fa3cdf | 2009-05-14 09:29:45 -0400 | [diff] [blame] | 2312 | if (ext4_should_order_data(mpd->inode)) { | 
|  | 2313 | err = ext4_jbd2_file_inode(handle, mpd->inode); | 
|  | 2314 | if (err) | 
|  | 2315 | return err; | 
|  | 2316 | } | 
|  | 2317 |  | 
|  | 2318 | /* | 
| Jan Kara | 03f5d8b | 2009-06-09 00:17:05 -0400 | [diff] [blame] | 2319 | * Update on-disk size along with block allocation. | 
| Theodore Ts'o | 2fa3cdf | 2009-05-14 09:29:45 -0400 | [diff] [blame] | 2320 | */ | 
|  | 2321 | disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; | 
|  | 2322 | if (disksize > i_size_read(mpd->inode)) | 
|  | 2323 | disksize = i_size_read(mpd->inode); | 
|  | 2324 | if (disksize > EXT4_I(mpd->inode)->i_disksize) { | 
|  | 2325 | ext4_update_i_disksize(mpd->inode, disksize); | 
|  | 2326 | return ext4_mark_inode_dirty(handle, mpd->inode); | 
|  | 2327 | } | 
|  | 2328 |  | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2329 | return 0; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2330 | } | 
|  | 2331 |  | 
| Aneesh Kumar K.V | bf068ee | 2008-08-19 22:16:43 -0400 | [diff] [blame] | 2332 | #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ | 
|  | 2333 | (1 << BH_Delay) | (1 << BH_Unwritten)) | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2334 |  | 
|  | 2335 | /* | 
|  | 2336 | * mpage_add_bh_to_extent - try to add one more block to extent of blocks | 
|  | 2337 | * | 
|  | 2338 | * @mpd->lbh - extent of blocks | 
|  | 2339 | * @logical - logical number of the block in the file | 
|  | 2340 | * @bh - bh of the block (used to access block's state) | 
|  | 2341 | * | 
|  | 2342 | * the function is used to collect contig. blocks in same state | 
|  | 2343 | */ | 
|  | 2344 | static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2345 | sector_t logical, size_t b_size, | 
|  | 2346 | unsigned long b_state) | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2347 | { | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2348 | sector_t next; | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2349 | int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2350 |  | 
| Mingming Cao | 525f4ed | 2008-08-19 22:15:58 -0400 | [diff] [blame] | 2351 | /* check if thereserved journal credits might overflow */ | 
|  | 2352 | if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { | 
|  | 2353 | if (nrblocks >= EXT4_MAX_TRANS_DATA) { | 
|  | 2354 | /* | 
|  | 2355 | * With non-extent format we are limited by the journal | 
|  | 2356 | * credit available.  Total credit needed to insert | 
|  | 2357 | * nrblocks contiguous blocks is dependent on the | 
|  | 2358 | * nrblocks.  So limit nrblocks. | 
|  | 2359 | */ | 
|  | 2360 | goto flush_it; | 
|  | 2361 | } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > | 
|  | 2362 | EXT4_MAX_TRANS_DATA) { | 
|  | 2363 | /* | 
|  | 2364 | * Adding the new buffer_head would make it cross the | 
|  | 2365 | * allowed limit for which we have journal credit | 
|  | 2366 | * reserved. So limit the new bh->b_size | 
|  | 2367 | */ | 
|  | 2368 | b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << | 
|  | 2369 | mpd->inode->i_blkbits; | 
|  | 2370 | /* we will do mpage_da_submit_io in the next loop */ | 
|  | 2371 | } | 
|  | 2372 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2373 | /* | 
|  | 2374 | * First block in the extent | 
|  | 2375 | */ | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2376 | if (mpd->b_size == 0) { | 
|  | 2377 | mpd->b_blocknr = logical; | 
|  | 2378 | mpd->b_size = b_size; | 
|  | 2379 | mpd->b_state = b_state & BH_FLAGS; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2380 | return; | 
|  | 2381 | } | 
|  | 2382 |  | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2383 | next = mpd->b_blocknr + nrblocks; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2384 | /* | 
|  | 2385 | * Can we merge the block to our big extent? | 
|  | 2386 | */ | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2387 | if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { | 
|  | 2388 | mpd->b_size += b_size; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2389 | return; | 
|  | 2390 | } | 
|  | 2391 |  | 
| Mingming Cao | 525f4ed | 2008-08-19 22:15:58 -0400 | [diff] [blame] | 2392 | flush_it: | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2393 | /* | 
|  | 2394 | * We couldn't merge the block to our extent, so we | 
|  | 2395 | * need to flush current  extent and start new one | 
|  | 2396 | */ | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2397 | if (mpage_da_map_blocks(mpd) == 0) | 
|  | 2398 | mpage_da_submit_io(mpd); | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2399 | mpd->io_done = 1; | 
|  | 2400 | return; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2401 | } | 
|  | 2402 |  | 
| Aneesh Kumar K.V | c364b22 | 2009-06-14 17:57:10 -0400 | [diff] [blame] | 2403 | static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 2404 | { | 
| Aneesh Kumar K.V | c364b22 | 2009-06-14 17:57:10 -0400 | [diff] [blame] | 2405 | return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 2406 | } | 
|  | 2407 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2408 | /* | 
|  | 2409 | * __mpage_da_writepage - finds extent of pages and blocks | 
|  | 2410 | * | 
|  | 2411 | * @page: page to consider | 
|  | 2412 | * @wbc: not used, we just follow rules | 
|  | 2413 | * @data: context | 
|  | 2414 | * | 
|  | 2415 | * The function finds extents of pages and scan them for all blocks. | 
|  | 2416 | */ | 
|  | 2417 | static int __mpage_da_writepage(struct page *page, | 
|  | 2418 | struct writeback_control *wbc, void *data) | 
|  | 2419 | { | 
|  | 2420 | struct mpage_da_data *mpd = data; | 
|  | 2421 | struct inode *inode = mpd->inode; | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2422 | struct buffer_head *bh, *head; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2423 | sector_t logical; | 
|  | 2424 |  | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2425 | if (mpd->io_done) { | 
|  | 2426 | /* | 
|  | 2427 | * Rest of the page in the page_vec | 
|  | 2428 | * redirty then and skip then. We will | 
| Anand Gadiyar | fd589a8 | 2009-07-16 17:13:03 +0200 | [diff] [blame] | 2429 | * try to write them again after | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2430 | * starting a new transaction | 
|  | 2431 | */ | 
|  | 2432 | redirty_page_for_writepage(wbc, page); | 
|  | 2433 | unlock_page(page); | 
|  | 2434 | return MPAGE_DA_EXTENT_TAIL; | 
|  | 2435 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2436 | /* | 
|  | 2437 | * Can we merge this page to current extent? | 
|  | 2438 | */ | 
|  | 2439 | if (mpd->next_page != page->index) { | 
|  | 2440 | /* | 
|  | 2441 | * Nope, we can't. So, we map non-allocated blocks | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2442 | * and start IO on them using writepage() | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2443 | */ | 
|  | 2444 | if (mpd->next_page != mpd->first_page) { | 
| Aneesh Kumar K.V | c4a0c46 | 2008-08-19 21:08:18 -0400 | [diff] [blame] | 2445 | if (mpage_da_map_blocks(mpd) == 0) | 
|  | 2446 | mpage_da_submit_io(mpd); | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2447 | /* | 
|  | 2448 | * skip rest of the page in the page_vec | 
|  | 2449 | */ | 
|  | 2450 | mpd->io_done = 1; | 
|  | 2451 | redirty_page_for_writepage(wbc, page); | 
|  | 2452 | unlock_page(page); | 
|  | 2453 | return MPAGE_DA_EXTENT_TAIL; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2454 | } | 
|  | 2455 |  | 
|  | 2456 | /* | 
|  | 2457 | * Start next extent of pages ... | 
|  | 2458 | */ | 
|  | 2459 | mpd->first_page = page->index; | 
|  | 2460 |  | 
|  | 2461 | /* | 
|  | 2462 | * ... and blocks | 
|  | 2463 | */ | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2464 | mpd->b_size = 0; | 
|  | 2465 | mpd->b_state = 0; | 
|  | 2466 | mpd->b_blocknr = 0; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2467 | } | 
|  | 2468 |  | 
|  | 2469 | mpd->next_page = page->index + 1; | 
|  | 2470 | logical = (sector_t) page->index << | 
|  | 2471 | (PAGE_CACHE_SHIFT - inode->i_blkbits); | 
|  | 2472 |  | 
|  | 2473 | if (!page_has_buffers(page)) { | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2474 | mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, | 
|  | 2475 | (1 << BH_Dirty) | (1 << BH_Uptodate)); | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2476 | if (mpd->io_done) | 
|  | 2477 | return MPAGE_DA_EXTENT_TAIL; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2478 | } else { | 
|  | 2479 | /* | 
|  | 2480 | * Page with regular buffer heads, just add all dirty ones | 
|  | 2481 | */ | 
|  | 2482 | head = page_buffers(page); | 
|  | 2483 | bh = head; | 
|  | 2484 | do { | 
|  | 2485 | BUG_ON(buffer_locked(bh)); | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 2486 | /* | 
|  | 2487 | * We need to try to allocate | 
|  | 2488 | * unmapped blocks in the same page. | 
|  | 2489 | * Otherwise we won't make progress | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 2490 | * with the page in ext4_writepage | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 2491 | */ | 
| Aneesh Kumar K.V | c364b22 | 2009-06-14 17:57:10 -0400 | [diff] [blame] | 2492 | if (ext4_bh_delay_or_unwritten(NULL, bh)) { | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2493 | mpage_add_bh_to_extent(mpd, logical, | 
|  | 2494 | bh->b_size, | 
|  | 2495 | bh->b_state); | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2496 | if (mpd->io_done) | 
|  | 2497 | return MPAGE_DA_EXTENT_TAIL; | 
| Aneesh Kumar K.V | 791b7f0 | 2009-01-05 21:50:43 -0500 | [diff] [blame] | 2498 | } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { | 
|  | 2499 | /* | 
|  | 2500 | * mapped dirty buffer. We need to update | 
|  | 2501 | * the b_state because we look at | 
|  | 2502 | * b_state in mpage_da_map_blocks. We don't | 
|  | 2503 | * update b_size because if we find an | 
|  | 2504 | * unmapped buffer_head later we need to | 
|  | 2505 | * use the b_state flag of that buffer_head. | 
|  | 2506 | */ | 
| Theodore Ts'o | 8dc207c | 2009-02-23 06:46:01 -0500 | [diff] [blame] | 2507 | if (mpd->b_size == 0) | 
|  | 2508 | mpd->b_state = bh->b_state & BH_FLAGS; | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2509 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2510 | logical++; | 
|  | 2511 | } while ((bh = bh->b_this_page) != head); | 
|  | 2512 | } | 
|  | 2513 |  | 
|  | 2514 | return 0; | 
|  | 2515 | } | 
|  | 2516 |  | 
|  | 2517 | /* | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 2518 | * This is a special get_blocks_t callback which is used by | 
|  | 2519 | * ext4_da_write_begin().  It will either return mapped block or | 
|  | 2520 | * reserve space for a single block. | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 2521 | * | 
|  | 2522 | * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. | 
|  | 2523 | * We also have b_blocknr = -1 and b_bdev initialized properly | 
|  | 2524 | * | 
|  | 2525 | * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. | 
|  | 2526 | * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev | 
|  | 2527 | * initialized properly. | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2528 | */ | 
|  | 2529 | static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, | 
|  | 2530 | struct buffer_head *bh_result, int create) | 
|  | 2531 | { | 
|  | 2532 | int ret = 0; | 
| Aneesh Kumar K.V | 33b9817 | 2009-05-12 14:40:37 -0400 | [diff] [blame] | 2533 | sector_t invalid_block = ~((sector_t) 0xffff); | 
|  | 2534 |  | 
|  | 2535 | if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) | 
|  | 2536 | invalid_block = ~0; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2537 |  | 
|  | 2538 | BUG_ON(create == 0); | 
|  | 2539 | BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); | 
|  | 2540 |  | 
|  | 2541 | /* | 
|  | 2542 | * first, we need to know whether the block is allocated already | 
|  | 2543 | * preallocated blocks are unmapped but should treated | 
|  | 2544 | * the same as allocated blocks. | 
|  | 2545 | */ | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 2546 | ret = ext4_get_blocks(NULL, inode, iblock, 1,  bh_result, 0); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 2547 | if ((ret == 0) && !buffer_delay(bh_result)) { | 
|  | 2548 | /* the block isn't (pre)allocated yet, let's reserve space */ | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2549 | /* | 
|  | 2550 | * XXX: __block_prepare_write() unmaps passed block, | 
|  | 2551 | * is it OK? | 
|  | 2552 | */ | 
| Theodore Ts'o | 9d0be50 | 2010-01-01 02:41:30 -0500 | [diff] [blame] | 2553 | ret = ext4_da_reserve_space(inode, iblock); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 2554 | if (ret) | 
|  | 2555 | /* not enough space to reserve */ | 
|  | 2556 | return ret; | 
|  | 2557 |  | 
| Aneesh Kumar K.V | 33b9817 | 2009-05-12 14:40:37 -0400 | [diff] [blame] | 2558 | map_bh(bh_result, inode->i_sb, invalid_block); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2559 | set_buffer_new(bh_result); | 
|  | 2560 | set_buffer_delay(bh_result); | 
|  | 2561 | } else if (ret > 0) { | 
|  | 2562 | bh_result->b_size = (ret << inode->i_blkbits); | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 2563 | if (buffer_unwritten(bh_result)) { | 
|  | 2564 | /* A delayed write to unwritten bh should | 
|  | 2565 | * be marked new and mapped.  Mapped ensures | 
|  | 2566 | * that we don't do get_block multiple times | 
|  | 2567 | * when we write to the same offset and new | 
|  | 2568 | * ensures that we do proper zero out for | 
|  | 2569 | * partial write. | 
|  | 2570 | */ | 
| Aneesh Kumar K.V | 9c1ee18 | 2009-05-13 18:36:58 -0400 | [diff] [blame] | 2571 | set_buffer_new(bh_result); | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 2572 | set_buffer_mapped(bh_result); | 
|  | 2573 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2574 | ret = 0; | 
|  | 2575 | } | 
|  | 2576 |  | 
|  | 2577 | return ret; | 
|  | 2578 | } | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2579 |  | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 2580 | /* | 
|  | 2581 | * This function is used as a standard get_block_t calback function | 
|  | 2582 | * when there is no desire to allocate any blocks.  It is used as a | 
|  | 2583 | * callback function for block_prepare_write(), nobh_writepage(), and | 
|  | 2584 | * block_write_full_page().  These functions should only try to map a | 
|  | 2585 | * single block at a time. | 
|  | 2586 | * | 
|  | 2587 | * Since this function doesn't do block allocations even if the caller | 
|  | 2588 | * requests it by passing in create=1, it is critically important that | 
|  | 2589 | * any caller checks to make sure that any buffer heads are returned | 
|  | 2590 | * by this function are either all already mapped or marked for | 
|  | 2591 | * delayed allocation before calling nobh_writepage() or | 
|  | 2592 | * block_write_full_page().  Otherwise, b_blocknr could be left | 
|  | 2593 | * unitialized, and the page write functions will be taken by | 
|  | 2594 | * surprise. | 
|  | 2595 | */ | 
|  | 2596 | static int noalloc_get_block_write(struct inode *inode, sector_t iblock, | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2597 | struct buffer_head *bh_result, int create) | 
|  | 2598 | { | 
|  | 2599 | int ret = 0; | 
|  | 2600 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | 
|  | 2601 |  | 
| Theodore Ts'o | a2dc52b | 2009-05-12 13:51:29 -0400 | [diff] [blame] | 2602 | BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); | 
|  | 2603 |  | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2604 | /* | 
|  | 2605 | * we don't want to do block allocation in writepage | 
|  | 2606 | * so call get_block_wrap with create = 0 | 
|  | 2607 | */ | 
| Theodore Ts'o | c217705 | 2009-05-14 00:58:52 -0400 | [diff] [blame] | 2608 | ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2609 | if (ret > 0) { | 
|  | 2610 | bh_result->b_size = (ret << inode->i_blkbits); | 
|  | 2611 | ret = 0; | 
|  | 2612 | } | 
|  | 2613 | return ret; | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2614 | } | 
|  | 2615 |  | 
| Aneesh Kumar K.V | 62e086b | 2009-06-14 17:59:34 -0400 | [diff] [blame] | 2616 | static int bget_one(handle_t *handle, struct buffer_head *bh) | 
|  | 2617 | { | 
|  | 2618 | get_bh(bh); | 
|  | 2619 | return 0; | 
|  | 2620 | } | 
|  | 2621 |  | 
|  | 2622 | static int bput_one(handle_t *handle, struct buffer_head *bh) | 
|  | 2623 | { | 
|  | 2624 | put_bh(bh); | 
|  | 2625 | return 0; | 
|  | 2626 | } | 
|  | 2627 |  | 
|  | 2628 | static int __ext4_journalled_writepage(struct page *page, | 
| Aneesh Kumar K.V | 62e086b | 2009-06-14 17:59:34 -0400 | [diff] [blame] | 2629 | unsigned int len) | 
|  | 2630 | { | 
|  | 2631 | struct address_space *mapping = page->mapping; | 
|  | 2632 | struct inode *inode = mapping->host; | 
|  | 2633 | struct buffer_head *page_bufs; | 
|  | 2634 | handle_t *handle = NULL; | 
|  | 2635 | int ret = 0; | 
|  | 2636 | int err; | 
|  | 2637 |  | 
|  | 2638 | page_bufs = page_buffers(page); | 
|  | 2639 | BUG_ON(!page_bufs); | 
|  | 2640 | walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); | 
|  | 2641 | /* As soon as we unlock the page, it can go away, but we have | 
|  | 2642 | * references to buffers so we are safe */ | 
|  | 2643 | unlock_page(page); | 
|  | 2644 |  | 
|  | 2645 | handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); | 
|  | 2646 | if (IS_ERR(handle)) { | 
|  | 2647 | ret = PTR_ERR(handle); | 
|  | 2648 | goto out; | 
|  | 2649 | } | 
|  | 2650 |  | 
|  | 2651 | ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, | 
|  | 2652 | do_journal_get_write_access); | 
|  | 2653 |  | 
|  | 2654 | err = walk_page_buffers(handle, page_bufs, 0, len, NULL, | 
|  | 2655 | write_end_fn); | 
|  | 2656 | if (ret == 0) | 
|  | 2657 | ret = err; | 
|  | 2658 | err = ext4_journal_stop(handle); | 
|  | 2659 | if (!ret) | 
|  | 2660 | ret = err; | 
|  | 2661 |  | 
|  | 2662 | walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 2663 | ext4_set_inode_state(inode, EXT4_STATE_JDATA); | 
| Aneesh Kumar K.V | 62e086b | 2009-06-14 17:59:34 -0400 | [diff] [blame] | 2664 | out: | 
|  | 2665 | return ret; | 
|  | 2666 | } | 
|  | 2667 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 2668 | static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); | 
|  | 2669 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); | 
|  | 2670 |  | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2671 | /* | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 2672 | * Note that we don't need to start a transaction unless we're journaling data | 
|  | 2673 | * because we should have holes filled from ext4_page_mkwrite(). We even don't | 
|  | 2674 | * need to file the inode to the transaction's list in ordered mode because if | 
|  | 2675 | * we are writing back data added by write(), the inode is already there and if | 
|  | 2676 | * we are writing back data modified via mmap(), noone guarantees in which | 
|  | 2677 | * transaction the data will hit the disk. In case we are journaling data, we | 
|  | 2678 | * cannot start transaction directly because transaction start ranks above page | 
|  | 2679 | * lock so we have to do some magic. | 
|  | 2680 | * | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 2681 | * This function can get called via... | 
|  | 2682 | *   - ext4_da_writepages after taking page lock (have journal handle) | 
|  | 2683 | *   - journal_submit_inode_data_buffers (no journal handle) | 
|  | 2684 | *   - shrink_page_list via pdflush (no journal handle) | 
|  | 2685 | *   - grab_page_cache when doing write_begin (have journal handle) | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 2686 | * | 
|  | 2687 | * We don't do any block allocation in this function. If we have page with | 
|  | 2688 | * multiple blocks we need to write those buffer_heads that are mapped. This | 
|  | 2689 | * is important for mmaped based write. So if we do with blocksize 1K | 
|  | 2690 | * truncate(f, 1024); | 
|  | 2691 | * a = mmap(f, 0, 4096); | 
|  | 2692 | * a[0] = 'a'; | 
|  | 2693 | * truncate(f, 4096); | 
|  | 2694 | * we have in the page first buffer_head mapped via page_mkwrite call back | 
|  | 2695 | * but other bufer_heads would be unmapped but dirty(dirty done via the | 
|  | 2696 | * do_wp_page). So writepage should write the first block. If we modify | 
|  | 2697 | * the mmap area beyond 1024 we will again get a page_fault and the | 
|  | 2698 | * page_mkwrite callback will do the block allocation and mark the | 
|  | 2699 | * buffer_heads mapped. | 
|  | 2700 | * | 
|  | 2701 | * We redirty the page if we have any buffer_heads that is either delay or | 
|  | 2702 | * unwritten in the page. | 
|  | 2703 | * | 
|  | 2704 | * We can get recursively called as show below. | 
|  | 2705 | * | 
|  | 2706 | *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> | 
|  | 2707 | *		ext4_writepage() | 
|  | 2708 | * | 
|  | 2709 | * But since we don't do any block allocation we should not deadlock. | 
|  | 2710 | * Page also have the dirty flag cleared so we don't get recurive page_lock. | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2711 | */ | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 2712 | static int ext4_writepage(struct page *page, | 
| Aneesh Kumar K.V | 62e086b | 2009-06-14 17:59:34 -0400 | [diff] [blame] | 2713 | struct writeback_control *wbc) | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2714 | { | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2715 | int ret = 0; | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2716 | loff_t size; | 
| Theodore Ts'o | 498e5f2 | 2008-11-05 00:14:04 -0500 | [diff] [blame] | 2717 | unsigned int len; | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 2718 | struct buffer_head *page_bufs = NULL; | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2719 | struct inode *inode = page->mapping->host; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2720 |  | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 2721 | trace_ext4_writepage(inode, page); | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2722 | size = i_size_read(inode); | 
|  | 2723 | if (page->index == size >> PAGE_CACHE_SHIFT) | 
|  | 2724 | len = size & ~PAGE_CACHE_MASK; | 
|  | 2725 | else | 
|  | 2726 | len = PAGE_CACHE_SIZE; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2727 |  | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2728 | if (page_has_buffers(page)) { | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2729 | page_bufs = page_buffers(page); | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2730 | if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, | 
| Aneesh Kumar K.V | c364b22 | 2009-06-14 17:57:10 -0400 | [diff] [blame] | 2731 | ext4_bh_delay_or_unwritten)) { | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2732 | /* | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2733 | * We don't want to do  block allocation | 
|  | 2734 | * So redirty the page and return | 
| Aneesh Kumar K.V | cd1aac3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2735 | * We may reach here when we do a journal commit | 
|  | 2736 | * via journal_submit_inode_data_buffers. | 
|  | 2737 | * If we don't have mapping block we just ignore | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2738 | * them. We can also reach here via shrink_page_list | 
|  | 2739 | */ | 
|  | 2740 | redirty_page_for_writepage(wbc, page); | 
|  | 2741 | unlock_page(page); | 
|  | 2742 | return 0; | 
|  | 2743 | } | 
|  | 2744 | } else { | 
|  | 2745 | /* | 
|  | 2746 | * The test for page_has_buffers() is subtle: | 
|  | 2747 | * We know the page is dirty but it lost buffers. That means | 
|  | 2748 | * that at some moment in time after write_begin()/write_end() | 
|  | 2749 | * has been called all buffers have been clean and thus they | 
|  | 2750 | * must have been written at least once. So they are all | 
|  | 2751 | * mapped and we can happily proceed with mapping them | 
|  | 2752 | * and writing the page. | 
|  | 2753 | * | 
|  | 2754 | * Try to initialize the buffer_heads and check whether | 
|  | 2755 | * all are mapped and non delay. We don't want to | 
|  | 2756 | * do block allocation here. | 
|  | 2757 | */ | 
| Aneesh Kumar K.V | b767e78 | 2009-06-04 08:06:06 -0400 | [diff] [blame] | 2758 | ret = block_prepare_write(page, 0, len, | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 2759 | noalloc_get_block_write); | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2760 | if (!ret) { | 
|  | 2761 | page_bufs = page_buffers(page); | 
|  | 2762 | /* check whether all are mapped and non delay */ | 
|  | 2763 | if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, | 
| Aneesh Kumar K.V | c364b22 | 2009-06-14 17:57:10 -0400 | [diff] [blame] | 2764 | ext4_bh_delay_or_unwritten)) { | 
| Aneesh Kumar K.V | f0e6c98 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2765 | redirty_page_for_writepage(wbc, page); | 
|  | 2766 | unlock_page(page); | 
|  | 2767 | return 0; | 
|  | 2768 | } | 
|  | 2769 | } else { | 
|  | 2770 | /* | 
|  | 2771 | * We can't do block allocation here | 
|  | 2772 | * so just redity the page and unlock | 
|  | 2773 | * and return | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2774 | */ | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2775 | redirty_page_for_writepage(wbc, page); | 
|  | 2776 | unlock_page(page); | 
|  | 2777 | return 0; | 
|  | 2778 | } | 
| Aneesh Kumar K.V | ed9b3e3 | 2008-11-07 09:06:45 -0500 | [diff] [blame] | 2779 | /* now mark the buffer_heads as dirty and uptodate */ | 
| Aneesh Kumar K.V | b767e78 | 2009-06-04 08:06:06 -0400 | [diff] [blame] | 2780 | block_commit_write(page, 0, len); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2781 | } | 
|  | 2782 |  | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 2783 | if (PageChecked(page) && ext4_should_journal_data(inode)) { | 
|  | 2784 | /* | 
|  | 2785 | * It's mmapped pagecache.  Add buffers and journal it.  There | 
|  | 2786 | * doesn't seem much point in redirtying the page here. | 
|  | 2787 | */ | 
|  | 2788 | ClearPageChecked(page); | 
| Wu Fengguang | 3f0ca30 | 2009-11-24 11:15:44 -0500 | [diff] [blame] | 2789 | return __ext4_journalled_writepage(page, len); | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 2790 | } | 
|  | 2791 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2792 | if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 2793 | ret = nobh_writepage(page, noalloc_get_block_write, wbc); | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 2794 | else if (page_bufs && buffer_uninit(page_bufs)) { | 
|  | 2795 | ext4_set_bh_endio(page_bufs, inode); | 
|  | 2796 | ret = block_write_full_page_endio(page, noalloc_get_block_write, | 
|  | 2797 | wbc, ext4_end_io_buffer_write); | 
|  | 2798 | } else | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 2799 | ret = block_write_full_page(page, noalloc_get_block_write, | 
|  | 2800 | wbc); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2801 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2802 | return ret; | 
|  | 2803 | } | 
|  | 2804 |  | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2805 | /* | 
| Mingming Cao | 525f4ed | 2008-08-19 22:15:58 -0400 | [diff] [blame] | 2806 | * This is called via ext4_da_writepages() to | 
|  | 2807 | * calulate the total number of credits to reserve to fit | 
|  | 2808 | * a single extent allocation into a single transaction, | 
|  | 2809 | * ext4_da_writpeages() will loop calling this before | 
|  | 2810 | * the block allocation. | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2811 | */ | 
| Mingming Cao | 525f4ed | 2008-08-19 22:15:58 -0400 | [diff] [blame] | 2812 |  | 
|  | 2813 | static int ext4_da_writepages_trans_blocks(struct inode *inode) | 
|  | 2814 | { | 
|  | 2815 | int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; | 
|  | 2816 |  | 
|  | 2817 | /* | 
|  | 2818 | * With non-extent format the journal credit needed to | 
|  | 2819 | * insert nrblocks contiguous block is dependent on | 
|  | 2820 | * number of contiguous block. So we will limit | 
|  | 2821 | * number of contiguous block to a sane value | 
|  | 2822 | */ | 
| Julia Lawall | 30c6e07a | 2009-11-15 15:30:58 -0500 | [diff] [blame] | 2823 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) && | 
| Mingming Cao | 525f4ed | 2008-08-19 22:15:58 -0400 | [diff] [blame] | 2824 | (max_blocks > EXT4_MAX_TRANS_DATA)) | 
|  | 2825 | max_blocks = EXT4_MAX_TRANS_DATA; | 
|  | 2826 |  | 
|  | 2827 | return ext4_chunk_trans_blocks(inode, max_blocks); | 
|  | 2828 | } | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2829 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2830 | static int ext4_da_writepages(struct address_space *mapping, | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2831 | struct writeback_control *wbc) | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2832 | { | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2833 | pgoff_t	index; | 
|  | 2834 | int range_whole = 0; | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2835 | handle_t *handle = NULL; | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2836 | struct mpage_da_data mpd; | 
| Aneesh Kumar K.V | 5e745b0 | 2008-08-18 18:00:57 -0400 | [diff] [blame] | 2837 | struct inode *inode = mapping->host; | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2838 | int no_nrwrite_index_update; | 
| Theodore Ts'o | 498e5f2 | 2008-11-05 00:14:04 -0500 | [diff] [blame] | 2839 | int pages_written = 0; | 
|  | 2840 | long pages_skipped; | 
| Theodore Ts'o | 55138e0 | 2009-09-29 13:31:31 -0400 | [diff] [blame] | 2841 | unsigned int max_pages; | 
| Aneesh Kumar K.V | 2acf2c2 | 2009-02-14 10:42:58 -0500 | [diff] [blame] | 2842 | int range_cyclic, cycled = 1, io_done = 0; | 
| Theodore Ts'o | 55138e0 | 2009-09-29 13:31:31 -0400 | [diff] [blame] | 2843 | int needed_blocks, ret = 0; | 
|  | 2844 | long desired_nr_to_write, nr_to_writebump = 0; | 
| Theodore Ts'o | de89de6 | 2009-08-31 17:00:59 -0400 | [diff] [blame] | 2845 | loff_t range_start = wbc->range_start; | 
| Aneesh Kumar K.V | 5e745b0 | 2008-08-18 18:00:57 -0400 | [diff] [blame] | 2846 | struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2847 |  | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 2848 | trace_ext4_da_writepages(inode, wbc); | 
| Theodore Ts'o | ba80b10 | 2009-01-03 20:03:21 -0500 | [diff] [blame] | 2849 |  | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2850 | /* | 
|  | 2851 | * No pages to write? This is mainly a kludge to avoid starting | 
|  | 2852 | * a transaction for special inodes like journal inode on last iput() | 
|  | 2853 | * because that could violate lock ordering on umount | 
|  | 2854 | */ | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2855 | if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2856 | return 0; | 
| Theodore Ts'o | 2a21e37 | 2008-11-05 09:22:24 -0500 | [diff] [blame] | 2857 |  | 
|  | 2858 | /* | 
|  | 2859 | * If the filesystem has aborted, it is read-only, so return | 
|  | 2860 | * right away instead of dumping stack traces later on that | 
|  | 2861 | * will obscure the real source of the problem.  We test | 
| Theodore Ts'o | 4ab2f15 | 2009-06-13 10:09:36 -0400 | [diff] [blame] | 2862 | * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because | 
| Theodore Ts'o | 2a21e37 | 2008-11-05 09:22:24 -0500 | [diff] [blame] | 2863 | * the latter could be true if the filesystem is mounted | 
|  | 2864 | * read-only, and in that case, ext4_da_writepages should | 
|  | 2865 | * *never* be called, so if that ever happens, we would want | 
|  | 2866 | * the stack trace. | 
|  | 2867 | */ | 
| Theodore Ts'o | 4ab2f15 | 2009-06-13 10:09:36 -0400 | [diff] [blame] | 2868 | if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) | 
| Theodore Ts'o | 2a21e37 | 2008-11-05 09:22:24 -0500 | [diff] [blame] | 2869 | return -EROFS; | 
|  | 2870 |  | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2871 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | 
|  | 2872 | range_whole = 1; | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2873 |  | 
| Aneesh Kumar K.V | 2acf2c2 | 2009-02-14 10:42:58 -0500 | [diff] [blame] | 2874 | range_cyclic = wbc->range_cyclic; | 
|  | 2875 | if (wbc->range_cyclic) { | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2876 | index = mapping->writeback_index; | 
| Aneesh Kumar K.V | 2acf2c2 | 2009-02-14 10:42:58 -0500 | [diff] [blame] | 2877 | if (index) | 
|  | 2878 | cycled = 0; | 
|  | 2879 | wbc->range_start = index << PAGE_CACHE_SHIFT; | 
|  | 2880 | wbc->range_end  = LLONG_MAX; | 
|  | 2881 | wbc->range_cyclic = 0; | 
|  | 2882 | } else | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2883 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2884 |  | 
| Theodore Ts'o | 55138e0 | 2009-09-29 13:31:31 -0400 | [diff] [blame] | 2885 | /* | 
|  | 2886 | * This works around two forms of stupidity.  The first is in | 
|  | 2887 | * the writeback code, which caps the maximum number of pages | 
|  | 2888 | * written to be 1024 pages.  This is wrong on multiple | 
|  | 2889 | * levels; different architectues have a different page size, | 
|  | 2890 | * which changes the maximum amount of data which gets | 
|  | 2891 | * written.  Secondly, 4 megabytes is way too small.  XFS | 
|  | 2892 | * forces this value to be 16 megabytes by multiplying | 
|  | 2893 | * nr_to_write parameter by four, and then relies on its | 
|  | 2894 | * allocator to allocate larger extents to make them | 
|  | 2895 | * contiguous.  Unfortunately this brings us to the second | 
|  | 2896 | * stupidity, which is that ext4's mballoc code only allocates | 
|  | 2897 | * at most 2048 blocks.  So we force contiguous writes up to | 
|  | 2898 | * the number of dirty blocks in the inode, or | 
|  | 2899 | * sbi->max_writeback_mb_bump whichever is smaller. | 
|  | 2900 | */ | 
|  | 2901 | max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); | 
|  | 2902 | if (!range_cyclic && range_whole) | 
|  | 2903 | desired_nr_to_write = wbc->nr_to_write * 8; | 
|  | 2904 | else | 
|  | 2905 | desired_nr_to_write = ext4_num_dirty_pages(inode, index, | 
|  | 2906 | max_pages); | 
|  | 2907 | if (desired_nr_to_write > max_pages) | 
|  | 2908 | desired_nr_to_write = max_pages; | 
|  | 2909 |  | 
|  | 2910 | if (wbc->nr_to_write < desired_nr_to_write) { | 
|  | 2911 | nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; | 
|  | 2912 | wbc->nr_to_write = desired_nr_to_write; | 
|  | 2913 | } | 
|  | 2914 |  | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2915 | mpd.wbc = wbc; | 
|  | 2916 | mpd.inode = mapping->host; | 
|  | 2917 |  | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2918 | /* | 
|  | 2919 | * we don't want write_cache_pages to update | 
|  | 2920 | * nr_to_write and writeback_index | 
|  | 2921 | */ | 
|  | 2922 | no_nrwrite_index_update = wbc->no_nrwrite_index_update; | 
|  | 2923 | wbc->no_nrwrite_index_update = 1; | 
|  | 2924 | pages_skipped = wbc->pages_skipped; | 
|  | 2925 |  | 
| Aneesh Kumar K.V | 2acf2c2 | 2009-02-14 10:42:58 -0500 | [diff] [blame] | 2926 | retry: | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2927 | while (!ret && wbc->nr_to_write > 0) { | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2928 |  | 
|  | 2929 | /* | 
|  | 2930 | * we  insert one extent at a time. So we need | 
|  | 2931 | * credit needed for single extent allocation. | 
|  | 2932 | * journalled mode is currently not supported | 
|  | 2933 | * by delalloc | 
|  | 2934 | */ | 
|  | 2935 | BUG_ON(ext4_should_journal_data(inode)); | 
| Mingming Cao | 525f4ed | 2008-08-19 22:15:58 -0400 | [diff] [blame] | 2936 | needed_blocks = ext4_da_writepages_trans_blocks(inode); | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2937 |  | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2938 | /* start a new transaction*/ | 
|  | 2939 | handle = ext4_journal_start(inode, needed_blocks); | 
|  | 2940 | if (IS_ERR(handle)) { | 
|  | 2941 | ret = PTR_ERR(handle); | 
| Theodore Ts'o | 1693918 | 2009-09-26 17:43:59 -0400 | [diff] [blame] | 2942 | ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2943 | "%ld pages, ino %lu; err %d\n", __func__, | 
|  | 2944 | wbc->nr_to_write, inode->i_ino, ret); | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2945 | goto out_writepages; | 
|  | 2946 | } | 
| Theodore Ts'o | f63e600 | 2009-02-23 16:42:39 -0500 | [diff] [blame] | 2947 |  | 
|  | 2948 | /* | 
|  | 2949 | * Now call __mpage_da_writepage to find the next | 
|  | 2950 | * contiguous region of logical blocks that need | 
|  | 2951 | * blocks to be allocated by ext4.  We don't actually | 
|  | 2952 | * submit the blocks for I/O here, even though | 
|  | 2953 | * write_cache_pages thinks it will, and will set the | 
|  | 2954 | * pages as clean for write before calling | 
|  | 2955 | * __mpage_da_writepage(). | 
|  | 2956 | */ | 
|  | 2957 | mpd.b_size = 0; | 
|  | 2958 | mpd.b_state = 0; | 
|  | 2959 | mpd.b_blocknr = 0; | 
|  | 2960 | mpd.first_page = 0; | 
|  | 2961 | mpd.next_page = 0; | 
|  | 2962 | mpd.io_done = 0; | 
|  | 2963 | mpd.pages_written = 0; | 
|  | 2964 | mpd.retval = 0; | 
|  | 2965 | ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, | 
|  | 2966 | &mpd); | 
|  | 2967 | /* | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 2968 | * If we have a contiguous extent of pages and we | 
| Theodore Ts'o | f63e600 | 2009-02-23 16:42:39 -0500 | [diff] [blame] | 2969 | * haven't done the I/O yet, map the blocks and submit | 
|  | 2970 | * them for I/O. | 
|  | 2971 | */ | 
|  | 2972 | if (!mpd.io_done && mpd.next_page != mpd.first_page) { | 
|  | 2973 | if (mpage_da_map_blocks(&mpd) == 0) | 
|  | 2974 | mpage_da_submit_io(&mpd); | 
|  | 2975 | mpd.io_done = 1; | 
|  | 2976 | ret = MPAGE_DA_EXTENT_TAIL; | 
|  | 2977 | } | 
| Theodore Ts'o | b3a3ca8 | 2009-08-31 23:13:11 -0400 | [diff] [blame] | 2978 | trace_ext4_da_write_pages(inode, &mpd); | 
| Theodore Ts'o | f63e600 | 2009-02-23 16:42:39 -0500 | [diff] [blame] | 2979 | wbc->nr_to_write -= mpd.pages_written; | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2980 |  | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 2981 | ext4_journal_stop(handle); | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2982 |  | 
| Eric Sandeen | 8f64b32 | 2009-02-26 00:57:35 -0500 | [diff] [blame] | 2983 | if ((mpd.retval == -ENOSPC) && sbi->s_journal) { | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2984 | /* commit the transaction which would | 
|  | 2985 | * free blocks released in the transaction | 
|  | 2986 | * and try again | 
|  | 2987 | */ | 
| Aneesh Kumar K.V | df22291 | 2008-09-08 23:05:34 -0400 | [diff] [blame] | 2988 | jbd2_journal_force_commit_nested(sbi->s_journal); | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2989 | wbc->pages_skipped = pages_skipped; | 
|  | 2990 | ret = 0; | 
|  | 2991 | } else if (ret == MPAGE_DA_EXTENT_TAIL) { | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2992 | /* | 
|  | 2993 | * got one extent now try with | 
|  | 2994 | * rest of the pages | 
|  | 2995 | */ | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 2996 | pages_written += mpd.pages_written; | 
|  | 2997 | wbc->pages_skipped = pages_skipped; | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 2998 | ret = 0; | 
| Aneesh Kumar K.V | 2acf2c2 | 2009-02-14 10:42:58 -0500 | [diff] [blame] | 2999 | io_done = 1; | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 3000 | } else if (wbc->nr_to_write) | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3001 | /* | 
|  | 3002 | * There is no more writeout needed | 
|  | 3003 | * or we requested for a noblocking writeout | 
|  | 3004 | * and we found the device congested | 
|  | 3005 | */ | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3006 | break; | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3007 | } | 
| Aneesh Kumar K.V | 2acf2c2 | 2009-02-14 10:42:58 -0500 | [diff] [blame] | 3008 | if (!io_done && !cycled) { | 
|  | 3009 | cycled = 1; | 
|  | 3010 | index = 0; | 
|  | 3011 | wbc->range_start = index << PAGE_CACHE_SHIFT; | 
|  | 3012 | wbc->range_end  = mapping->writeback_index - 1; | 
|  | 3013 | goto retry; | 
|  | 3014 | } | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 3015 | if (pages_skipped != wbc->pages_skipped) | 
| Theodore Ts'o | 1693918 | 2009-09-26 17:43:59 -0400 | [diff] [blame] | 3016 | ext4_msg(inode->i_sb, KERN_CRIT, | 
|  | 3017 | "This should not happen leaving %s " | 
|  | 3018 | "with nr_to_write = %ld ret = %d\n", | 
|  | 3019 | __func__, wbc->nr_to_write, ret); | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3020 |  | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 3021 | /* Update index */ | 
|  | 3022 | index += pages_written; | 
| Aneesh Kumar K.V | 2acf2c2 | 2009-02-14 10:42:58 -0500 | [diff] [blame] | 3023 | wbc->range_cyclic = range_cyclic; | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 3024 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | 
|  | 3025 | /* | 
|  | 3026 | * set the writeback_index so that range_cyclic | 
|  | 3027 | * mode will write it back later | 
|  | 3028 | */ | 
|  | 3029 | mapping->writeback_index = index; | 
| Aneesh Kumar K.V | a1d6cc5 | 2008-08-19 21:55:02 -0400 | [diff] [blame] | 3030 |  | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3031 | out_writepages: | 
| Aneesh Kumar K.V | 22208de | 2008-10-16 10:10:36 -0400 | [diff] [blame] | 3032 | if (!no_nrwrite_index_update) | 
|  | 3033 | wbc->no_nrwrite_index_update = 0; | 
| Richard Kennedy | 2faf2e1 | 2009-12-25 15:46:07 -0500 | [diff] [blame] | 3034 | wbc->nr_to_write -= nr_to_writebump; | 
| Theodore Ts'o | de89de6 | 2009-08-31 17:00:59 -0400 | [diff] [blame] | 3035 | wbc->range_start = range_start; | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 3036 | trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); | 
| Mingming Cao | 61628a3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3037 | return ret; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3038 | } | 
|  | 3039 |  | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 3040 | #define FALL_BACK_TO_NONDELALLOC 1 | 
|  | 3041 | static int ext4_nonda_switch(struct super_block *sb) | 
|  | 3042 | { | 
|  | 3043 | s64 free_blocks, dirty_blocks; | 
|  | 3044 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 
|  | 3045 |  | 
|  | 3046 | /* | 
|  | 3047 | * switch to non delalloc mode if we are running low | 
|  | 3048 | * on free block. The free block accounting via percpu | 
| Eric Dumazet | 179f7eb | 2009-01-06 14:41:04 -0800 | [diff] [blame] | 3049 | * counters can get slightly wrong with percpu_counter_batch getting | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 3050 | * accumulated on each CPU without updating global counters | 
|  | 3051 | * Delalloc need an accurate free block accounting. So switch | 
|  | 3052 | * to non delalloc when we are near to error range. | 
|  | 3053 | */ | 
|  | 3054 | free_blocks  = percpu_counter_read_positive(&sbi->s_freeblocks_counter); | 
|  | 3055 | dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); | 
|  | 3056 | if (2 * free_blocks < 3 * dirty_blocks || | 
|  | 3057 | free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { | 
|  | 3058 | /* | 
| Eric Sandeen | c8afb44 | 2009-12-23 07:58:12 -0500 | [diff] [blame] | 3059 | * free block count is less than 150% of dirty blocks | 
|  | 3060 | * or free blocks is less than watermark | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 3061 | */ | 
|  | 3062 | return 1; | 
|  | 3063 | } | 
| Eric Sandeen | c8afb44 | 2009-12-23 07:58:12 -0500 | [diff] [blame] | 3064 | /* | 
|  | 3065 | * Even if we don't switch but are nearing capacity, | 
|  | 3066 | * start pushing delalloc when 1/2 of free blocks are dirty. | 
|  | 3067 | */ | 
|  | 3068 | if (free_blocks < 2 * dirty_blocks) | 
|  | 3069 | writeback_inodes_sb_if_idle(sb); | 
|  | 3070 |  | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 3071 | return 0; | 
|  | 3072 | } | 
|  | 3073 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3074 | static int ext4_da_write_begin(struct file *file, struct address_space *mapping, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 3075 | loff_t pos, unsigned len, unsigned flags, | 
|  | 3076 | struct page **pagep, void **fsdata) | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3077 | { | 
| Aneesh Kumar K.V | 1db9138 | 2010-01-22 17:06:20 -0500 | [diff] [blame] | 3078 | int ret, retries = 0, quota_retries = 0; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3079 | struct page *page; | 
|  | 3080 | pgoff_t index; | 
|  | 3081 | unsigned from, to; | 
|  | 3082 | struct inode *inode = mapping->host; | 
|  | 3083 | handle_t *handle; | 
|  | 3084 |  | 
|  | 3085 | index = pos >> PAGE_CACHE_SHIFT; | 
|  | 3086 | from = pos & (PAGE_CACHE_SIZE - 1); | 
|  | 3087 | to = from + len; | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 3088 |  | 
|  | 3089 | if (ext4_nonda_switch(inode->i_sb)) { | 
|  | 3090 | *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; | 
|  | 3091 | return ext4_write_begin(file, mapping, pos, | 
|  | 3092 | len, flags, pagep, fsdata); | 
|  | 3093 | } | 
|  | 3094 | *fsdata = (void *)0; | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 3095 | trace_ext4_da_write_begin(inode, pos, len, flags); | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 3096 | retry: | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3097 | /* | 
|  | 3098 | * With delayed allocation, we don't log the i_disksize update | 
|  | 3099 | * if there is delayed block allocation. But we still need | 
|  | 3100 | * to journalling the i_disksize update if writes to the end | 
|  | 3101 | * of file which has an already mapped buffer. | 
|  | 3102 | */ | 
|  | 3103 | handle = ext4_journal_start(inode, 1); | 
|  | 3104 | if (IS_ERR(handle)) { | 
|  | 3105 | ret = PTR_ERR(handle); | 
|  | 3106 | goto out; | 
|  | 3107 | } | 
| Jan Kara | ebd3610 | 2009-02-22 21:09:59 -0500 | [diff] [blame] | 3108 | /* We cannot recurse into the filesystem as the transaction is already | 
|  | 3109 | * started */ | 
|  | 3110 | flags |= AOP_FLAG_NOFS; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3111 |  | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 3112 | page = grab_cache_page_write_begin(mapping, index, flags); | 
| Eric Sandeen | d5a0d4f | 2008-08-02 18:51:06 -0400 | [diff] [blame] | 3113 | if (!page) { | 
|  | 3114 | ext4_journal_stop(handle); | 
|  | 3115 | ret = -ENOMEM; | 
|  | 3116 | goto out; | 
|  | 3117 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3118 | *pagep = page; | 
|  | 3119 |  | 
|  | 3120 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, | 
| Theodore Ts'o | b920c75 | 2009-05-14 00:54:29 -0400 | [diff] [blame] | 3121 | ext4_da_get_block_prep); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3122 | if (ret < 0) { | 
|  | 3123 | unlock_page(page); | 
|  | 3124 | ext4_journal_stop(handle); | 
|  | 3125 | page_cache_release(page); | 
| Aneesh Kumar K.V | ae4d537 | 2008-09-13 13:10:25 -0400 | [diff] [blame] | 3126 | /* | 
|  | 3127 | * block_write_begin may have instantiated a few blocks | 
|  | 3128 | * outside i_size.  Trim these off again. Don't need | 
|  | 3129 | * i_size_read because we hold i_mutex. | 
|  | 3130 | */ | 
|  | 3131 | if (pos + len > inode->i_size) | 
| Jan Kara | b9a4207 | 2009-12-08 21:24:33 -0500 | [diff] [blame] | 3132 | ext4_truncate_failed_write(inode); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3133 | } | 
|  | 3134 |  | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 3135 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 
|  | 3136 | goto retry; | 
| Aneesh Kumar K.V | 1db9138 | 2010-01-22 17:06:20 -0500 | [diff] [blame] | 3137 |  | 
|  | 3138 | if ((ret == -EDQUOT) && | 
|  | 3139 | EXT4_I(inode)->i_reserved_meta_blocks && | 
|  | 3140 | (quota_retries++ < 3)) { | 
|  | 3141 | /* | 
|  | 3142 | * Since we often over-estimate the number of meta | 
|  | 3143 | * data blocks required, we may sometimes get a | 
|  | 3144 | * spurios out of quota error even though there would | 
|  | 3145 | * be enough space once we write the data blocks and | 
|  | 3146 | * find out how many meta data blocks were _really_ | 
|  | 3147 | * required.  So try forcing the inode write to see if | 
|  | 3148 | * that helps. | 
|  | 3149 | */ | 
|  | 3150 | write_inode_now(inode, (quota_retries == 3)); | 
|  | 3151 | goto retry; | 
|  | 3152 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3153 | out: | 
|  | 3154 | return ret; | 
|  | 3155 | } | 
|  | 3156 |  | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3157 | /* | 
|  | 3158 | * Check if we should update i_disksize | 
|  | 3159 | * when write to the end of file but not require block allocation | 
|  | 3160 | */ | 
|  | 3161 | static int ext4_da_should_update_i_disksize(struct page *page, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 3162 | unsigned long offset) | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3163 | { | 
|  | 3164 | struct buffer_head *bh; | 
|  | 3165 | struct inode *inode = page->mapping->host; | 
|  | 3166 | unsigned int idx; | 
|  | 3167 | int i; | 
|  | 3168 |  | 
|  | 3169 | bh = page_buffers(page); | 
|  | 3170 | idx = offset >> inode->i_blkbits; | 
|  | 3171 |  | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 3172 | for (i = 0; i < idx; i++) | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3173 | bh = bh->b_this_page; | 
|  | 3174 |  | 
| Aneesh Kumar K.V | 29fa89d | 2009-05-12 16:30:27 -0400 | [diff] [blame] | 3175 | if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3176 | return 0; | 
|  | 3177 | return 1; | 
|  | 3178 | } | 
|  | 3179 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3180 | static int ext4_da_write_end(struct file *file, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 3181 | struct address_space *mapping, | 
|  | 3182 | loff_t pos, unsigned len, unsigned copied, | 
|  | 3183 | struct page *page, void *fsdata) | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3184 | { | 
|  | 3185 | struct inode *inode = mapping->host; | 
|  | 3186 | int ret = 0, ret2; | 
|  | 3187 | handle_t *handle = ext4_journal_current_handle(); | 
|  | 3188 | loff_t new_i_size; | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3189 | unsigned long start, end; | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 3190 | int write_mode = (int)(unsigned long)fsdata; | 
|  | 3191 |  | 
|  | 3192 | if (write_mode == FALL_BACK_TO_NONDELALLOC) { | 
|  | 3193 | if (ext4_should_order_data(inode)) { | 
|  | 3194 | return ext4_ordered_write_end(file, mapping, pos, | 
|  | 3195 | len, copied, page, fsdata); | 
|  | 3196 | } else if (ext4_should_writeback_data(inode)) { | 
|  | 3197 | return ext4_writeback_write_end(file, mapping, pos, | 
|  | 3198 | len, copied, page, fsdata); | 
|  | 3199 | } else { | 
|  | 3200 | BUG(); | 
|  | 3201 | } | 
|  | 3202 | } | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3203 |  | 
| Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 3204 | trace_ext4_da_write_end(inode, pos, len, copied); | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3205 | start = pos & (PAGE_CACHE_SIZE - 1); | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 3206 | end = start + copied - 1; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3207 |  | 
|  | 3208 | /* | 
|  | 3209 | * generic_write_end() will run mark_inode_dirty() if i_size | 
|  | 3210 | * changes.  So let's piggyback the i_disksize mark_inode_dirty | 
|  | 3211 | * into that. | 
|  | 3212 | */ | 
|  | 3213 |  | 
|  | 3214 | new_i_size = pos + copied; | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3215 | if (new_i_size > EXT4_I(inode)->i_disksize) { | 
|  | 3216 | if (ext4_da_should_update_i_disksize(page, end)) { | 
|  | 3217 | down_write(&EXT4_I(inode)->i_data_sem); | 
|  | 3218 | if (new_i_size > EXT4_I(inode)->i_disksize) { | 
|  | 3219 | /* | 
|  | 3220 | * Updating i_disksize when extending file | 
|  | 3221 | * without needing block allocation | 
|  | 3222 | */ | 
|  | 3223 | if (ext4_should_order_data(inode)) | 
|  | 3224 | ret = ext4_jbd2_file_inode(handle, | 
|  | 3225 | inode); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3226 |  | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3227 | EXT4_I(inode)->i_disksize = new_i_size; | 
|  | 3228 | } | 
|  | 3229 | up_write(&EXT4_I(inode)->i_data_sem); | 
| Aneesh Kumar K.V | cf17fea | 2008-09-13 13:06:18 -0400 | [diff] [blame] | 3230 | /* We need to mark inode dirty even if | 
|  | 3231 | * new_i_size is less that inode->i_size | 
|  | 3232 | * bu greater than i_disksize.(hint delalloc) | 
|  | 3233 | */ | 
|  | 3234 | ext4_mark_inode_dirty(handle, inode); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3235 | } | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3236 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3237 | ret2 = generic_write_end(file, mapping, pos, len, copied, | 
|  | 3238 | page, fsdata); | 
|  | 3239 | copied = ret2; | 
|  | 3240 | if (ret2 < 0) | 
|  | 3241 | ret = ret2; | 
|  | 3242 | ret2 = ext4_journal_stop(handle); | 
|  | 3243 | if (!ret) | 
|  | 3244 | ret = ret2; | 
|  | 3245 |  | 
|  | 3246 | return ret ? ret : copied; | 
|  | 3247 | } | 
|  | 3248 |  | 
|  | 3249 | static void ext4_da_invalidatepage(struct page *page, unsigned long offset) | 
|  | 3250 | { | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3251 | /* | 
|  | 3252 | * Drop reserved blocks | 
|  | 3253 | */ | 
|  | 3254 | BUG_ON(!PageLocked(page)); | 
|  | 3255 | if (!page_has_buffers(page)) | 
|  | 3256 | goto out; | 
|  | 3257 |  | 
| Mingming Cao | d2a1763 | 2008-07-14 17:52:37 -0400 | [diff] [blame] | 3258 | ext4_da_page_release_reservation(page, offset); | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3259 |  | 
|  | 3260 | out: | 
|  | 3261 | ext4_invalidatepage(page, offset); | 
|  | 3262 |  | 
|  | 3263 | return; | 
|  | 3264 | } | 
|  | 3265 |  | 
| Theodore Ts'o | ccd2506 | 2009-02-26 01:04:07 -0500 | [diff] [blame] | 3266 | /* | 
|  | 3267 | * Force all delayed allocation blocks to be allocated for a given inode. | 
|  | 3268 | */ | 
|  | 3269 | int ext4_alloc_da_blocks(struct inode *inode) | 
|  | 3270 | { | 
| Theodore Ts'o | fb40ba0 | 2009-09-16 19:30:40 -0400 | [diff] [blame] | 3271 | trace_ext4_alloc_da_blocks(inode); | 
|  | 3272 |  | 
| Theodore Ts'o | ccd2506 | 2009-02-26 01:04:07 -0500 | [diff] [blame] | 3273 | if (!EXT4_I(inode)->i_reserved_data_blocks && | 
|  | 3274 | !EXT4_I(inode)->i_reserved_meta_blocks) | 
|  | 3275 | return 0; | 
|  | 3276 |  | 
|  | 3277 | /* | 
|  | 3278 | * We do something simple for now.  The filemap_flush() will | 
|  | 3279 | * also start triggering a write of the data blocks, which is | 
|  | 3280 | * not strictly speaking necessary (and for users of | 
|  | 3281 | * laptop_mode, not even desirable).  However, to do otherwise | 
|  | 3282 | * would require replicating code paths in: | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 3283 | * | 
| Theodore Ts'o | ccd2506 | 2009-02-26 01:04:07 -0500 | [diff] [blame] | 3284 | * ext4_da_writepages() -> | 
|  | 3285 | *    write_cache_pages() ---> (via passed in callback function) | 
|  | 3286 | *        __mpage_da_writepage() --> | 
|  | 3287 | *           mpage_add_bh_to_extent() | 
|  | 3288 | *           mpage_da_map_blocks() | 
|  | 3289 | * | 
|  | 3290 | * The problem is that write_cache_pages(), located in | 
|  | 3291 | * mm/page-writeback.c, marks pages clean in preparation for | 
|  | 3292 | * doing I/O, which is not desirable if we're not planning on | 
|  | 3293 | * doing I/O at all. | 
|  | 3294 | * | 
|  | 3295 | * We could call write_cache_pages(), and then redirty all of | 
|  | 3296 | * the pages by calling redirty_page_for_writeback() but that | 
|  | 3297 | * would be ugly in the extreme.  So instead we would need to | 
|  | 3298 | * replicate parts of the code in the above functions, | 
|  | 3299 | * simplifying them becuase we wouldn't actually intend to | 
|  | 3300 | * write out the pages, but rather only collect contiguous | 
|  | 3301 | * logical block extents, call the multi-block allocator, and | 
|  | 3302 | * then update the buffer heads with the block allocations. | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 3303 | * | 
| Theodore Ts'o | ccd2506 | 2009-02-26 01:04:07 -0500 | [diff] [blame] | 3304 | * For now, though, we'll cheat by calling filemap_flush(), | 
|  | 3305 | * which will map the blocks, and start the I/O, but not | 
|  | 3306 | * actually wait for the I/O to complete. | 
|  | 3307 | */ | 
|  | 3308 | return filemap_flush(inode->i_mapping); | 
|  | 3309 | } | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3310 |  | 
|  | 3311 | /* | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3312 | * bmap() is special.  It gets used by applications such as lilo and by | 
|  | 3313 | * the swapper to find the on-disk block of a specific piece of data. | 
|  | 3314 | * | 
|  | 3315 | * Naturally, this is dangerous if the block concerned is still in the | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3316 | * journal.  If somebody makes a swapfile on an ext4 data-journaling | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3317 | * filesystem and enables swap, then they may get a nasty shock when the | 
|  | 3318 | * data getting swapped to that swapfile suddenly gets overwritten by | 
|  | 3319 | * the original zero's written out previously to the journal and | 
|  | 3320 | * awaiting writeback in the kernel's buffer cache. | 
|  | 3321 | * | 
|  | 3322 | * So, if we see any bmap calls here on a modified, data-journaled file, | 
|  | 3323 | * take extra steps to flush any blocks which might be in the cache. | 
|  | 3324 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3325 | static sector_t ext4_bmap(struct address_space *mapping, sector_t block) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3326 | { | 
|  | 3327 | struct inode *inode = mapping->host; | 
|  | 3328 | journal_t *journal; | 
|  | 3329 | int err; | 
|  | 3330 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 3331 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && | 
|  | 3332 | test_opt(inode->i_sb, DELALLOC)) { | 
|  | 3333 | /* | 
|  | 3334 | * With delalloc we want to sync the file | 
|  | 3335 | * so that we can make sure we allocate | 
|  | 3336 | * blocks for file | 
|  | 3337 | */ | 
|  | 3338 | filemap_write_and_wait(mapping); | 
|  | 3339 | } | 
|  | 3340 |  | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 3341 | if (EXT4_JOURNAL(inode) && | 
|  | 3342 | ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3343 | /* | 
|  | 3344 | * This is a REALLY heavyweight approach, but the use of | 
|  | 3345 | * bmap on dirty files is expected to be extremely rare: | 
|  | 3346 | * only if we run lilo or swapon on a freshly made file | 
|  | 3347 | * do we expect this to happen. | 
|  | 3348 | * | 
|  | 3349 | * (bmap requires CAP_SYS_RAWIO so this does not | 
|  | 3350 | * represent an unprivileged user DOS attack --- we'd be | 
|  | 3351 | * in trouble if mortal users could trigger this path at | 
|  | 3352 | * will.) | 
|  | 3353 | * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3354 | * NB. EXT4_STATE_JDATA is not set on files other than | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3355 | * regular files.  If somebody wants to bmap a directory | 
|  | 3356 | * or symlink and gets confused because the buffer | 
|  | 3357 | * hasn't yet been flushed to disk, they deserve | 
|  | 3358 | * everything they get. | 
|  | 3359 | */ | 
|  | 3360 |  | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 3361 | ext4_clear_inode_state(inode, EXT4_STATE_JDATA); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3362 | journal = EXT4_JOURNAL(inode); | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 3363 | jbd2_journal_lock_updates(journal); | 
|  | 3364 | err = jbd2_journal_flush(journal); | 
|  | 3365 | jbd2_journal_unlock_updates(journal); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3366 |  | 
|  | 3367 | if (err) | 
|  | 3368 | return 0; | 
|  | 3369 | } | 
|  | 3370 |  | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 3371 | return generic_block_bmap(mapping, block, ext4_get_block); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3372 | } | 
|  | 3373 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3374 | static int ext4_readpage(struct file *file, struct page *page) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3375 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3376 | return mpage_readpage(page, ext4_get_block); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3377 | } | 
|  | 3378 |  | 
|  | 3379 | static int | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3380 | ext4_readpages(struct file *file, struct address_space *mapping, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3381 | struct list_head *pages, unsigned nr_pages) | 
|  | 3382 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3383 | return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3384 | } | 
|  | 3385 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3386 | static void ext4_free_io_end(ext4_io_end_t *io) | 
|  | 3387 | { | 
|  | 3388 | BUG_ON(!io); | 
|  | 3389 | if (io->page) | 
|  | 3390 | put_page(io->page); | 
|  | 3391 | iput(io->inode); | 
|  | 3392 | kfree(io); | 
|  | 3393 | } | 
|  | 3394 |  | 
|  | 3395 | static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) | 
|  | 3396 | { | 
|  | 3397 | struct buffer_head *head, *bh; | 
|  | 3398 | unsigned int curr_off = 0; | 
|  | 3399 |  | 
|  | 3400 | if (!page_has_buffers(page)) | 
|  | 3401 | return; | 
|  | 3402 | head = bh = page_buffers(page); | 
|  | 3403 | do { | 
|  | 3404 | if (offset <= curr_off && test_clear_buffer_uninit(bh) | 
|  | 3405 | && bh->b_private) { | 
|  | 3406 | ext4_free_io_end(bh->b_private); | 
|  | 3407 | bh->b_private = NULL; | 
|  | 3408 | bh->b_end_io = NULL; | 
|  | 3409 | } | 
|  | 3410 | curr_off = curr_off + bh->b_size; | 
|  | 3411 | bh = bh->b_this_page; | 
|  | 3412 | } while (bh != head); | 
|  | 3413 | } | 
|  | 3414 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3415 | static void ext4_invalidatepage(struct page *page, unsigned long offset) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3416 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3417 | journal_t *journal = EXT4_JOURNAL(page->mapping->host); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3418 |  | 
|  | 3419 | /* | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3420 | * free any io_end structure allocated for buffers to be discarded | 
|  | 3421 | */ | 
|  | 3422 | if (ext4_should_dioread_nolock(page->mapping->host)) | 
|  | 3423 | ext4_invalidatepage_free_endio(page, offset); | 
|  | 3424 | /* | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3425 | * If it's a full truncate we just forget about the pending dirtying | 
|  | 3426 | */ | 
|  | 3427 | if (offset == 0) | 
|  | 3428 | ClearPageChecked(page); | 
|  | 3429 |  | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 3430 | if (journal) | 
|  | 3431 | jbd2_journal_invalidatepage(journal, page, offset); | 
|  | 3432 | else | 
|  | 3433 | block_invalidatepage(page, offset); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3434 | } | 
|  | 3435 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3436 | static int ext4_releasepage(struct page *page, gfp_t wait) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3437 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3438 | journal_t *journal = EXT4_JOURNAL(page->mapping->host); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3439 |  | 
|  | 3440 | WARN_ON(PageChecked(page)); | 
|  | 3441 | if (!page_has_buffers(page)) | 
|  | 3442 | return 0; | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 3443 | if (journal) | 
|  | 3444 | return jbd2_journal_try_to_free_buffers(journal, page, wait); | 
|  | 3445 | else | 
|  | 3446 | return try_to_free_buffers(page); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3447 | } | 
|  | 3448 |  | 
|  | 3449 | /* | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3450 | * O_DIRECT for ext3 (or indirect map) based files | 
|  | 3451 | * | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3452 | * If the O_DIRECT write will extend the file then add this inode to the | 
|  | 3453 | * orphan list.  So recovery will truncate it back to the original size | 
|  | 3454 | * if the machine crashes during the write. | 
|  | 3455 | * | 
|  | 3456 | * If the O_DIRECT write is intantiating holes inside i_size and the machine | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3457 | * crashes then stale disk data _may_ be exposed inside the file. But current | 
|  | 3458 | * VFS code falls back into buffered path in that case so we are safe. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3459 | */ | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3460 | static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 3461 | const struct iovec *iov, loff_t offset, | 
|  | 3462 | unsigned long nr_segs) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3463 | { | 
|  | 3464 | struct file *file = iocb->ki_filp; | 
|  | 3465 | struct inode *inode = file->f_mapping->host; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3466 | struct ext4_inode_info *ei = EXT4_I(inode); | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3467 | handle_t *handle; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3468 | ssize_t ret; | 
|  | 3469 | int orphan = 0; | 
|  | 3470 | size_t count = iov_length(iov, nr_segs); | 
| Eric Sandeen | fbbf694 | 2009-10-02 21:20:55 -0400 | [diff] [blame] | 3471 | int retries = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3472 |  | 
|  | 3473 | if (rw == WRITE) { | 
|  | 3474 | loff_t final_size = offset + count; | 
|  | 3475 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3476 | if (final_size > inode->i_size) { | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3477 | /* Credits for sb + inode write */ | 
|  | 3478 | handle = ext4_journal_start(inode, 2); | 
|  | 3479 | if (IS_ERR(handle)) { | 
|  | 3480 | ret = PTR_ERR(handle); | 
|  | 3481 | goto out; | 
|  | 3482 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3483 | ret = ext4_orphan_add(handle, inode); | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3484 | if (ret) { | 
|  | 3485 | ext4_journal_stop(handle); | 
|  | 3486 | goto out; | 
|  | 3487 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3488 | orphan = 1; | 
|  | 3489 | ei->i_disksize = inode->i_size; | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3490 | ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3491 | } | 
|  | 3492 | } | 
|  | 3493 |  | 
| Eric Sandeen | fbbf694 | 2009-10-02 21:20:55 -0400 | [diff] [blame] | 3494 | retry: | 
| Jiaying Zhang | b7adc1f | 2010-03-02 13:26:36 -0500 | [diff] [blame] | 3495 | if (rw == READ && ext4_should_dioread_nolock(inode)) | 
|  | 3496 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | 
|  | 3497 | inode->i_sb->s_bdev, iov, | 
|  | 3498 | offset, nr_segs, | 
|  | 3499 | ext4_get_block, NULL); | 
|  | 3500 | else | 
|  | 3501 | ret = blockdev_direct_IO(rw, iocb, inode, | 
|  | 3502 | inode->i_sb->s_bdev, iov, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3503 | offset, nr_segs, | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3504 | ext4_get_block, NULL); | 
| Eric Sandeen | fbbf694 | 2009-10-02 21:20:55 -0400 | [diff] [blame] | 3505 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 
|  | 3506 | goto retry; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3507 |  | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3508 | if (orphan) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3509 | int err; | 
|  | 3510 |  | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3511 | /* Credits for sb + inode write */ | 
|  | 3512 | handle = ext4_journal_start(inode, 2); | 
|  | 3513 | if (IS_ERR(handle)) { | 
|  | 3514 | /* This is really bad luck. We've written the data | 
|  | 3515 | * but cannot extend i_size. Bail out and pretend | 
|  | 3516 | * the write failed... */ | 
|  | 3517 | ret = PTR_ERR(handle); | 
| Dmitry Monakhov | da1dafc | 2010-03-01 23:15:02 -0500 | [diff] [blame] | 3518 | if (inode->i_nlink) | 
|  | 3519 | ext4_orphan_del(NULL, inode); | 
|  | 3520 |  | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3521 | goto out; | 
|  | 3522 | } | 
|  | 3523 | if (inode->i_nlink) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3524 | ext4_orphan_del(handle, inode); | 
| Jan Kara | 7fb5409 | 2008-02-10 01:08:38 -0500 | [diff] [blame] | 3525 | if (ret > 0) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3526 | loff_t end = offset + ret; | 
|  | 3527 | if (end > inode->i_size) { | 
|  | 3528 | ei->i_disksize = end; | 
|  | 3529 | i_size_write(inode, end); | 
|  | 3530 | /* | 
|  | 3531 | * We're going to return a positive `ret' | 
|  | 3532 | * here due to non-zero-length I/O, so there's | 
|  | 3533 | * no way of reporting error returns from | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3534 | * ext4_mark_inode_dirty() to userspace.  So | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3535 | * ignore it. | 
|  | 3536 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3537 | ext4_mark_inode_dirty(handle, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3538 | } | 
|  | 3539 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3540 | err = ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3541 | if (ret == 0) | 
|  | 3542 | ret = err; | 
|  | 3543 | } | 
|  | 3544 | out: | 
|  | 3545 | return ret; | 
|  | 3546 | } | 
|  | 3547 |  | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3548 | static int ext4_get_block_write(struct inode *inode, sector_t iblock, | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3549 | struct buffer_head *bh_result, int create) | 
|  | 3550 | { | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3551 | handle_t *handle = ext4_journal_current_handle(); | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3552 | int ret = 0; | 
|  | 3553 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | 
|  | 3554 | int dio_credits; | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3555 | int started = 0; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3556 |  | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3557 | ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3558 | inode->i_ino, create); | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3559 | /* | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3560 | * ext4_get_block in prepare for a DIO write or buffer write. | 
|  | 3561 | * We allocate an uinitialized extent if blocks haven't been allocated. | 
|  | 3562 | * The extent will be converted to initialized after IO complete. | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3563 | */ | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3564 | create = EXT4_GET_BLOCKS_IO_CREATE_EXT; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3565 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3566 | if (!handle) { | 
|  | 3567 | if (max_blocks > DIO_MAX_BLOCKS) | 
|  | 3568 | max_blocks = DIO_MAX_BLOCKS; | 
|  | 3569 | dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); | 
|  | 3570 | handle = ext4_journal_start(inode, dio_credits); | 
|  | 3571 | if (IS_ERR(handle)) { | 
|  | 3572 | ret = PTR_ERR(handle); | 
|  | 3573 | goto out; | 
|  | 3574 | } | 
|  | 3575 | started = 1; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3576 | } | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3577 |  | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3578 | ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, | 
|  | 3579 | create); | 
|  | 3580 | if (ret > 0) { | 
|  | 3581 | bh_result->b_size = (ret << inode->i_blkbits); | 
|  | 3582 | ret = 0; | 
|  | 3583 | } | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3584 | if (started) | 
|  | 3585 | ext4_journal_stop(handle); | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3586 | out: | 
|  | 3587 | return ret; | 
|  | 3588 | } | 
|  | 3589 |  | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3590 | static void dump_completed_IO(struct inode * inode) | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3591 | { | 
|  | 3592 | #ifdef	EXT4_DEBUG | 
|  | 3593 | struct list_head *cur, *before, *after; | 
|  | 3594 | ext4_io_end_t *io, *io0, *io1; | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3595 | unsigned long flags; | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3596 |  | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3597 | if (list_empty(&EXT4_I(inode)->i_completed_io_list)){ | 
|  | 3598 | ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3599 | return; | 
|  | 3600 | } | 
|  | 3601 |  | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3602 | ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino); | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3603 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3604 | list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){ | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3605 | cur = &io->list; | 
|  | 3606 | before = cur->prev; | 
|  | 3607 | io0 = container_of(before, ext4_io_end_t, list); | 
|  | 3608 | after = cur->next; | 
|  | 3609 | io1 = container_of(after, ext4_io_end_t, list); | 
|  | 3610 |  | 
|  | 3611 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", | 
|  | 3612 | io, inode->i_ino, io0, io1); | 
|  | 3613 | } | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3614 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3615 | #endif | 
|  | 3616 | } | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3617 |  | 
|  | 3618 | /* | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3619 | * check a range of space and convert unwritten extents to written. | 
|  | 3620 | */ | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3621 | static int ext4_end_io_nolock(ext4_io_end_t *io) | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3622 | { | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3623 | struct inode *inode = io->inode; | 
|  | 3624 | loff_t offset = io->offset; | 
| Eric Sandeen | a1de02d | 2010-02-04 23:58:38 -0500 | [diff] [blame] | 3625 | ssize_t size = io->size; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3626 | int ret = 0; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3627 |  | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3628 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3629 | "list->prev 0x%p\n", | 
|  | 3630 | io, inode->i_ino, io->list.next, io->list.prev); | 
|  | 3631 |  | 
|  | 3632 | if (list_empty(&io->list)) | 
|  | 3633 | return ret; | 
|  | 3634 |  | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3635 | if (io->flag != EXT4_IO_UNWRITTEN) | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3636 | return ret; | 
|  | 3637 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3638 | ret = ext4_convert_unwritten_extents(inode, offset, size); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3639 | if (ret < 0) { | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3640 | printk(KERN_EMERG "%s: failed to convert unwritten" | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3641 | "extents to written extents, error is %d" | 
|  | 3642 | " io is still on inode %lu aio dio list\n", | 
|  | 3643 | __func__, ret, inode->i_ino); | 
|  | 3644 | return ret; | 
|  | 3645 | } | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3646 |  | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3647 | /* clear the DIO AIO unwritten flag */ | 
|  | 3648 | io->flag = 0; | 
|  | 3649 | return ret; | 
|  | 3650 | } | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3651 |  | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3652 | /* | 
|  | 3653 | * work on completed aio dio IO, to convert unwritten extents to extents | 
|  | 3654 | */ | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3655 | static void ext4_end_io_work(struct work_struct *work) | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3656 | { | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3657 | ext4_io_end_t		*io = container_of(work, ext4_io_end_t, work); | 
|  | 3658 | struct inode		*inode = io->inode; | 
|  | 3659 | struct ext4_inode_info	*ei = EXT4_I(inode); | 
|  | 3660 | unsigned long		flags; | 
|  | 3661 | int			ret; | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3662 |  | 
|  | 3663 | mutex_lock(&inode->i_mutex); | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3664 | ret = ext4_end_io_nolock(io); | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3665 | if (ret < 0) { | 
|  | 3666 | mutex_unlock(&inode->i_mutex); | 
|  | 3667 | return; | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3668 | } | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3669 |  | 
|  | 3670 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 
|  | 3671 | if (!list_empty(&io->list)) | 
|  | 3672 | list_del_init(&io->list); | 
|  | 3673 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3674 | mutex_unlock(&inode->i_mutex); | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3675 | ext4_free_io_end(io); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3676 | } | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3677 |  | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3678 | /* | 
|  | 3679 | * This function is called from ext4_sync_file(). | 
|  | 3680 | * | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3681 | * When IO is completed, the work to convert unwritten extents to | 
|  | 3682 | * written is queued on workqueue but may not get immediately | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3683 | * scheduled. When fsync is called, we need to ensure the | 
|  | 3684 | * conversion is complete before fsync returns. | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3685 | * The inode keeps track of a list of pending/completed IO that | 
|  | 3686 | * might needs to do the conversion. This function walks through | 
|  | 3687 | * the list and convert the related unwritten extents for completed IO | 
|  | 3688 | * to written. | 
|  | 3689 | * The function return the number of pending IOs on success. | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3690 | */ | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3691 | int flush_completed_IO(struct inode *inode) | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3692 | { | 
|  | 3693 | ext4_io_end_t *io; | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3694 | struct ext4_inode_info *ei = EXT4_I(inode); | 
|  | 3695 | unsigned long flags; | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3696 | int ret = 0; | 
|  | 3697 | int ret2 = 0; | 
|  | 3698 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3699 | if (list_empty(&ei->i_completed_io_list)) | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3700 | return ret; | 
|  | 3701 |  | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3702 | dump_completed_IO(inode); | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3703 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 
|  | 3704 | while (!list_empty(&ei->i_completed_io_list)){ | 
|  | 3705 | io = list_entry(ei->i_completed_io_list.next, | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3706 | ext4_io_end_t, list); | 
|  | 3707 | /* | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3708 | * Calling ext4_end_io_nolock() to convert completed | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3709 | * IO to written. | 
|  | 3710 | * | 
|  | 3711 | * When ext4_sync_file() is called, run_queue() may already | 
|  | 3712 | * about to flush the work corresponding to this io structure. | 
|  | 3713 | * It will be upset if it founds the io structure related | 
|  | 3714 | * to the work-to-be schedule is freed. | 
|  | 3715 | * | 
|  | 3716 | * Thus we need to keep the io structure still valid here after | 
|  | 3717 | * convertion finished. The io structure has a flag to | 
|  | 3718 | * avoid double converting from both fsync and background work | 
|  | 3719 | * queue work. | 
|  | 3720 | */ | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3721 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3722 | ret = ext4_end_io_nolock(io); | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3723 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3724 | if (ret < 0) | 
|  | 3725 | ret2 = ret; | 
|  | 3726 | else | 
|  | 3727 | list_del_init(&io->list); | 
|  | 3728 | } | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3729 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3730 | return (ret2 < 0) ? ret2 : 0; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3731 | } | 
|  | 3732 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3733 | static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3734 | { | 
|  | 3735 | ext4_io_end_t *io = NULL; | 
|  | 3736 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3737 | io = kmalloc(sizeof(*io), flags); | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3738 |  | 
|  | 3739 | if (io) { | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3740 | igrab(inode); | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3741 | io->inode = inode; | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3742 | io->flag = 0; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3743 | io->offset = 0; | 
|  | 3744 | io->size = 0; | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3745 | io->page = NULL; | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3746 | INIT_WORK(&io->work, ext4_end_io_work); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3747 | INIT_LIST_HEAD(&io->list); | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3748 | } | 
|  | 3749 |  | 
|  | 3750 | return io; | 
|  | 3751 | } | 
|  | 3752 |  | 
|  | 3753 | static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | 
|  | 3754 | ssize_t size, void *private) | 
|  | 3755 | { | 
|  | 3756 | ext4_io_end_t *io_end = iocb->private; | 
|  | 3757 | struct workqueue_struct *wq; | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3758 | unsigned long flags; | 
|  | 3759 | struct ext4_inode_info *ei; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3760 |  | 
| Mingming | 4b70df1 | 2009-11-03 14:44:54 -0500 | [diff] [blame] | 3761 | /* if not async direct IO or dio with 0 bytes write, just return */ | 
|  | 3762 | if (!io_end || !size) | 
|  | 3763 | return; | 
|  | 3764 |  | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3765 | ext_debug("ext4_end_io_dio(): io_end 0x%p" | 
|  | 3766 | "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", | 
|  | 3767 | iocb->private, io_end->inode->i_ino, iocb, offset, | 
|  | 3768 | size); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3769 |  | 
|  | 3770 | /* if not aio dio with unwritten extents, just free io and return */ | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3771 | if (io_end->flag != EXT4_IO_UNWRITTEN){ | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3772 | ext4_free_io_end(io_end); | 
|  | 3773 | iocb->private = NULL; | 
|  | 3774 | return; | 
|  | 3775 | } | 
|  | 3776 |  | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3777 | io_end->offset = offset; | 
|  | 3778 | io_end->size = size; | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3779 | io_end->flag = EXT4_IO_UNWRITTEN; | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3780 | wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; | 
|  | 3781 |  | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3782 | /* queue the work to convert unwritten extents to written */ | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3783 | queue_work(wq, &io_end->work); | 
|  | 3784 |  | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3785 | /* Add the io_end to per-inode completed aio dio list*/ | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3786 | ei = EXT4_I(io_end->inode); | 
|  | 3787 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 
|  | 3788 | list_add_tail(&io_end->list, &ei->i_completed_io_list); | 
|  | 3789 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3790 | iocb->private = NULL; | 
|  | 3791 | } | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3792 |  | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3793 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) | 
|  | 3794 | { | 
|  | 3795 | ext4_io_end_t *io_end = bh->b_private; | 
|  | 3796 | struct workqueue_struct *wq; | 
|  | 3797 | struct inode *inode; | 
|  | 3798 | unsigned long flags; | 
|  | 3799 |  | 
|  | 3800 | if (!test_clear_buffer_uninit(bh) || !io_end) | 
|  | 3801 | goto out; | 
|  | 3802 |  | 
|  | 3803 | if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { | 
|  | 3804 | printk("sb umounted, discard end_io request for inode %lu\n", | 
|  | 3805 | io_end->inode->i_ino); | 
|  | 3806 | ext4_free_io_end(io_end); | 
|  | 3807 | goto out; | 
|  | 3808 | } | 
|  | 3809 |  | 
|  | 3810 | io_end->flag = EXT4_IO_UNWRITTEN; | 
|  | 3811 | inode = io_end->inode; | 
|  | 3812 |  | 
|  | 3813 | /* Add the io_end to per-inode completed io list*/ | 
|  | 3814 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); | 
|  | 3815 | list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); | 
|  | 3816 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); | 
|  | 3817 |  | 
|  | 3818 | wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; | 
|  | 3819 | /* queue the work to convert unwritten extents to written */ | 
|  | 3820 | queue_work(wq, &io_end->work); | 
|  | 3821 | out: | 
|  | 3822 | bh->b_private = NULL; | 
|  | 3823 | bh->b_end_io = NULL; | 
|  | 3824 | clear_buffer_uninit(bh); | 
|  | 3825 | end_buffer_async_write(bh, uptodate); | 
|  | 3826 | } | 
|  | 3827 |  | 
|  | 3828 | static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) | 
|  | 3829 | { | 
|  | 3830 | ext4_io_end_t *io_end; | 
|  | 3831 | struct page *page = bh->b_page; | 
|  | 3832 | loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; | 
|  | 3833 | size_t size = bh->b_size; | 
|  | 3834 |  | 
|  | 3835 | retry: | 
|  | 3836 | io_end = ext4_init_io_end(inode, GFP_ATOMIC); | 
|  | 3837 | if (!io_end) { | 
|  | 3838 | if (printk_ratelimit()) | 
|  | 3839 | printk(KERN_WARNING "%s: allocation fail\n", __func__); | 
|  | 3840 | schedule(); | 
|  | 3841 | goto retry; | 
|  | 3842 | } | 
|  | 3843 | io_end->offset = offset; | 
|  | 3844 | io_end->size = size; | 
|  | 3845 | /* | 
|  | 3846 | * We need to hold a reference to the page to make sure it | 
|  | 3847 | * doesn't get evicted before ext4_end_io_work() has a chance | 
|  | 3848 | * to convert the extent from written to unwritten. | 
|  | 3849 | */ | 
|  | 3850 | io_end->page = page; | 
|  | 3851 | get_page(io_end->page); | 
|  | 3852 |  | 
|  | 3853 | bh->b_private = io_end; | 
|  | 3854 | bh->b_end_io = ext4_end_io_buffer_write; | 
|  | 3855 | return 0; | 
|  | 3856 | } | 
|  | 3857 |  | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3858 | /* | 
|  | 3859 | * For ext4 extent files, ext4 will do direct-io write to holes, | 
|  | 3860 | * preallocated extents, and those write extend the file, no need to | 
|  | 3861 | * fall back to buffered IO. | 
|  | 3862 | * | 
|  | 3863 | * For holes, we fallocate those blocks, mark them as unintialized | 
|  | 3864 | * If those blocks were preallocated, we mark sure they are splited, but | 
|  | 3865 | * still keep the range to write as unintialized. | 
|  | 3866 | * | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3867 | * The unwrritten extents will be converted to written when DIO is completed. | 
|  | 3868 | * For async direct IO, since the IO may still pending when return, we | 
|  | 3869 | * set up an end_io call back function, which will do the convertion | 
|  | 3870 | * when async direct IO completed. | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3871 | * | 
|  | 3872 | * If the O_DIRECT write will extend the file then add this inode to the | 
|  | 3873 | * orphan list.  So recovery will truncate it back to the original size | 
|  | 3874 | * if the machine crashes during the write. | 
|  | 3875 | * | 
|  | 3876 | */ | 
|  | 3877 | static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | 
|  | 3878 | const struct iovec *iov, loff_t offset, | 
|  | 3879 | unsigned long nr_segs) | 
|  | 3880 | { | 
|  | 3881 | struct file *file = iocb->ki_filp; | 
|  | 3882 | struct inode *inode = file->f_mapping->host; | 
|  | 3883 | ssize_t ret; | 
|  | 3884 | size_t count = iov_length(iov, nr_segs); | 
|  | 3885 |  | 
|  | 3886 | loff_t final_size = offset + count; | 
|  | 3887 | if (rw == WRITE && final_size <= inode->i_size) { | 
|  | 3888 | /* | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3889 | * We could direct write to holes and fallocate. | 
|  | 3890 | * | 
|  | 3891 | * Allocated blocks to fill the hole are marked as uninitialized | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3892 | * to prevent paralel buffered read to expose the stale data | 
|  | 3893 | * before DIO complete the data IO. | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3894 | * | 
|  | 3895 | * As to previously fallocated extents, ext4 get_block | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3896 | * will just simply mark the buffer mapped but still | 
|  | 3897 | * keep the extents uninitialized. | 
|  | 3898 | * | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3899 | * for non AIO case, we will convert those unwritten extents | 
|  | 3900 | * to written after return back from blockdev_direct_IO. | 
|  | 3901 | * | 
|  | 3902 | * for async DIO, the conversion needs to be defered when | 
|  | 3903 | * the IO is completed. The ext4 end_io callback function | 
|  | 3904 | * will be called to take care of the conversion work. | 
|  | 3905 | * Here for async case, we allocate an io_end structure to | 
|  | 3906 | * hook to the iocb. | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3907 | */ | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3908 | iocb->private = NULL; | 
|  | 3909 | EXT4_I(inode)->cur_aio_dio = NULL; | 
|  | 3910 | if (!is_sync_kiocb(iocb)) { | 
| Jiaying Zhang | 744692d | 2010-03-04 16:14:02 -0500 | [diff] [blame] | 3911 | iocb->private = ext4_init_io_end(inode, GFP_NOFS); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3912 | if (!iocb->private) | 
|  | 3913 | return -ENOMEM; | 
|  | 3914 | /* | 
|  | 3915 | * we save the io structure for current async | 
|  | 3916 | * direct IO, so that later ext4_get_blocks() | 
|  | 3917 | * could flag the io structure whether there | 
|  | 3918 | * is a unwritten extents needs to be converted | 
|  | 3919 | * when IO is completed. | 
|  | 3920 | */ | 
|  | 3921 | EXT4_I(inode)->cur_aio_dio = iocb->private; | 
|  | 3922 | } | 
|  | 3923 |  | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3924 | ret = blockdev_direct_IO(rw, iocb, inode, | 
|  | 3925 | inode->i_sb->s_bdev, iov, | 
|  | 3926 | offset, nr_segs, | 
| Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 3927 | ext4_get_block_write, | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3928 | ext4_end_io_dio); | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3929 | if (iocb->private) | 
|  | 3930 | EXT4_I(inode)->cur_aio_dio = NULL; | 
|  | 3931 | /* | 
|  | 3932 | * The io_end structure takes a reference to the inode, | 
|  | 3933 | * that structure needs to be destroyed and the | 
|  | 3934 | * reference to the inode need to be dropped, when IO is | 
|  | 3935 | * complete, even with 0 byte write, or failed. | 
|  | 3936 | * | 
|  | 3937 | * In the successful AIO DIO case, the io_end structure will be | 
|  | 3938 | * desctroyed and the reference to the inode will be dropped | 
|  | 3939 | * after the end_io call back function is called. | 
|  | 3940 | * | 
|  | 3941 | * In the case there is 0 byte write, or error case, since | 
|  | 3942 | * VFS direct IO won't invoke the end_io call back function, | 
|  | 3943 | * we need to free the end_io structure here. | 
|  | 3944 | */ | 
|  | 3945 | if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { | 
|  | 3946 | ext4_free_io_end(iocb->private); | 
|  | 3947 | iocb->private = NULL; | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 3948 | } else if (ret > 0 && ext4_test_inode_state(inode, | 
|  | 3949 | EXT4_STATE_DIO_UNWRITTEN)) { | 
| Mingming | 109f556 | 2009-11-10 10:48:08 -0500 | [diff] [blame] | 3950 | int err; | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3951 | /* | 
|  | 3952 | * for non AIO case, since the IO is already | 
|  | 3953 | * completed, we could do the convertion right here | 
|  | 3954 | */ | 
| Mingming | 109f556 | 2009-11-10 10:48:08 -0500 | [diff] [blame] | 3955 | err = ext4_convert_unwritten_extents(inode, | 
|  | 3956 | offset, ret); | 
|  | 3957 | if (err < 0) | 
|  | 3958 | ret = err; | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 3959 | ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); | 
| Mingming | 109f556 | 2009-11-10 10:48:08 -0500 | [diff] [blame] | 3960 | } | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3961 | return ret; | 
|  | 3962 | } | 
| Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 3963 |  | 
|  | 3964 | /* for write the the end of file case, we fall back to old way */ | 
| Mingming Cao | 4c0425f | 2009-09-28 15:48:41 -0400 | [diff] [blame] | 3965 | return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); | 
|  | 3966 | } | 
|  | 3967 |  | 
|  | 3968 | static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, | 
|  | 3969 | const struct iovec *iov, loff_t offset, | 
|  | 3970 | unsigned long nr_segs) | 
|  | 3971 | { | 
|  | 3972 | struct file *file = iocb->ki_filp; | 
|  | 3973 | struct inode *inode = file->f_mapping->host; | 
|  | 3974 |  | 
|  | 3975 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) | 
|  | 3976 | return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); | 
|  | 3977 |  | 
|  | 3978 | return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); | 
|  | 3979 | } | 
|  | 3980 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3981 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3982 | * Pages can be marked dirty completely asynchronously from ext4's journalling | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3983 | * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do | 
|  | 3984 | * much here because ->set_page_dirty is called under VFS locks.  The page is | 
|  | 3985 | * not necessarily locked. | 
|  | 3986 | * | 
|  | 3987 | * We cannot just dirty the page and leave attached buffers clean, because the | 
|  | 3988 | * buffers' dirty state is "definitive".  We cannot just set the buffers dirty | 
|  | 3989 | * or jbddirty because all the journalling code will explode. | 
|  | 3990 | * | 
|  | 3991 | * So what we do is to mark the page "pending dirty" and next time writepage | 
|  | 3992 | * is called, propagate that into the buffers appropriately. | 
|  | 3993 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 3994 | static int ext4_journalled_set_page_dirty(struct page *page) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3995 | { | 
|  | 3996 | SetPageChecked(page); | 
|  | 3997 | return __set_page_dirty_nobuffers(page); | 
|  | 3998 | } | 
|  | 3999 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4000 | static const struct address_space_operations ext4_ordered_aops = { | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 4001 | .readpage		= ext4_readpage, | 
|  | 4002 | .readpages		= ext4_readpages, | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 4003 | .writepage		= ext4_writepage, | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 4004 | .sync_page		= block_sync_page, | 
|  | 4005 | .write_begin		= ext4_write_begin, | 
|  | 4006 | .write_end		= ext4_ordered_write_end, | 
|  | 4007 | .bmap			= ext4_bmap, | 
|  | 4008 | .invalidatepage		= ext4_invalidatepage, | 
|  | 4009 | .releasepage		= ext4_releasepage, | 
|  | 4010 | .direct_IO		= ext4_direct_IO, | 
|  | 4011 | .migratepage		= buffer_migrate_page, | 
|  | 4012 | .is_partially_uptodate  = block_is_partially_uptodate, | 
| Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 4013 | .error_remove_page	= generic_error_remove_page, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4014 | }; | 
|  | 4015 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4016 | static const struct address_space_operations ext4_writeback_aops = { | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 4017 | .readpage		= ext4_readpage, | 
|  | 4018 | .readpages		= ext4_readpages, | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 4019 | .writepage		= ext4_writepage, | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 4020 | .sync_page		= block_sync_page, | 
|  | 4021 | .write_begin		= ext4_write_begin, | 
|  | 4022 | .write_end		= ext4_writeback_write_end, | 
|  | 4023 | .bmap			= ext4_bmap, | 
|  | 4024 | .invalidatepage		= ext4_invalidatepage, | 
|  | 4025 | .releasepage		= ext4_releasepage, | 
|  | 4026 | .direct_IO		= ext4_direct_IO, | 
|  | 4027 | .migratepage		= buffer_migrate_page, | 
|  | 4028 | .is_partially_uptodate  = block_is_partially_uptodate, | 
| Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 4029 | .error_remove_page	= generic_error_remove_page, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4030 | }; | 
|  | 4031 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4032 | static const struct address_space_operations ext4_journalled_aops = { | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 4033 | .readpage		= ext4_readpage, | 
|  | 4034 | .readpages		= ext4_readpages, | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 4035 | .writepage		= ext4_writepage, | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 4036 | .sync_page		= block_sync_page, | 
|  | 4037 | .write_begin		= ext4_write_begin, | 
|  | 4038 | .write_end		= ext4_journalled_write_end, | 
|  | 4039 | .set_page_dirty		= ext4_journalled_set_page_dirty, | 
|  | 4040 | .bmap			= ext4_bmap, | 
|  | 4041 | .invalidatepage		= ext4_invalidatepage, | 
|  | 4042 | .releasepage		= ext4_releasepage, | 
|  | 4043 | .is_partially_uptodate  = block_is_partially_uptodate, | 
| Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 4044 | .error_remove_page	= generic_error_remove_page, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4045 | }; | 
|  | 4046 |  | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4047 | static const struct address_space_operations ext4_da_aops = { | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 4048 | .readpage		= ext4_readpage, | 
|  | 4049 | .readpages		= ext4_readpages, | 
| Aneesh Kumar K.V | 43ce1d2 | 2009-06-14 17:58:45 -0400 | [diff] [blame] | 4050 | .writepage		= ext4_writepage, | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 4051 | .writepages		= ext4_da_writepages, | 
|  | 4052 | .sync_page		= block_sync_page, | 
|  | 4053 | .write_begin		= ext4_da_write_begin, | 
|  | 4054 | .write_end		= ext4_da_write_end, | 
|  | 4055 | .bmap			= ext4_bmap, | 
|  | 4056 | .invalidatepage		= ext4_da_invalidatepage, | 
|  | 4057 | .releasepage		= ext4_releasepage, | 
|  | 4058 | .direct_IO		= ext4_direct_IO, | 
|  | 4059 | .migratepage		= buffer_migrate_page, | 
|  | 4060 | .is_partially_uptodate  = block_is_partially_uptodate, | 
| Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 4061 | .error_remove_page	= generic_error_remove_page, | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4062 | }; | 
|  | 4063 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4064 | void ext4_set_aops(struct inode *inode) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4065 | { | 
| Aneesh Kumar K.V | cd1aac3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4066 | if (ext4_should_order_data(inode) && | 
|  | 4067 | test_opt(inode->i_sb, DELALLOC)) | 
|  | 4068 | inode->i_mapping->a_ops = &ext4_da_aops; | 
|  | 4069 | else if (ext4_should_order_data(inode)) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4070 | inode->i_mapping->a_ops = &ext4_ordered_aops; | 
| Alex Tomas | 6476924 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4071 | else if (ext4_should_writeback_data(inode) && | 
|  | 4072 | test_opt(inode->i_sb, DELALLOC)) | 
|  | 4073 | inode->i_mapping->a_ops = &ext4_da_aops; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4074 | else if (ext4_should_writeback_data(inode)) | 
|  | 4075 | inode->i_mapping->a_ops = &ext4_writeback_aops; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4076 | else | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4077 | inode->i_mapping->a_ops = &ext4_journalled_aops; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4078 | } | 
|  | 4079 |  | 
|  | 4080 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4081 | * ext4_block_truncate_page() zeroes out a mapping from file offset `from' | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4082 | * up to the end of the block which corresponds to `from'. | 
|  | 4083 | * This required during truncate. We need to physically zero the tail end | 
|  | 4084 | * of that block so it doesn't yield old data if the file is later grown. | 
|  | 4085 | */ | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4086 | int ext4_block_truncate_page(handle_t *handle, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4087 | struct address_space *mapping, loff_t from) | 
|  | 4088 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4089 | ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4090 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 4091 | unsigned blocksize, length, pos; | 
|  | 4092 | ext4_lblk_t iblock; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4093 | struct inode *inode = mapping->host; | 
|  | 4094 | struct buffer_head *bh; | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4095 | struct page *page; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4096 | int err = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4097 |  | 
| Theodore Ts'o | f4a0101 | 2009-07-05 22:08:16 -0400 | [diff] [blame] | 4098 | page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, | 
|  | 4099 | mapping_gfp_mask(mapping) & ~__GFP_FS); | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4100 | if (!page) | 
|  | 4101 | return -EINVAL; | 
|  | 4102 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4103 | blocksize = inode->i_sb->s_blocksize; | 
|  | 4104 | length = blocksize - (offset & (blocksize - 1)); | 
|  | 4105 | iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); | 
|  | 4106 |  | 
|  | 4107 | /* | 
|  | 4108 | * For "nobh" option,  we can only work if we don't need to | 
|  | 4109 | * read-in the page - otherwise we create buffers to do the IO. | 
|  | 4110 | */ | 
|  | 4111 | if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4112 | ext4_should_writeback_data(inode) && PageUptodate(page)) { | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 4113 | zero_user(page, offset, length); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4114 | set_page_dirty(page); | 
|  | 4115 | goto unlock; | 
|  | 4116 | } | 
|  | 4117 |  | 
|  | 4118 | if (!page_has_buffers(page)) | 
|  | 4119 | create_empty_buffers(page, blocksize, 0); | 
|  | 4120 |  | 
|  | 4121 | /* Find the buffer that contains "offset" */ | 
|  | 4122 | bh = page_buffers(page); | 
|  | 4123 | pos = blocksize; | 
|  | 4124 | while (offset >= pos) { | 
|  | 4125 | bh = bh->b_this_page; | 
|  | 4126 | iblock++; | 
|  | 4127 | pos += blocksize; | 
|  | 4128 | } | 
|  | 4129 |  | 
|  | 4130 | err = 0; | 
|  | 4131 | if (buffer_freed(bh)) { | 
|  | 4132 | BUFFER_TRACE(bh, "freed: skip"); | 
|  | 4133 | goto unlock; | 
|  | 4134 | } | 
|  | 4135 |  | 
|  | 4136 | if (!buffer_mapped(bh)) { | 
|  | 4137 | BUFFER_TRACE(bh, "unmapped"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4138 | ext4_get_block(inode, iblock, bh, 0); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4139 | /* unmapped? It's a hole - nothing to do */ | 
|  | 4140 | if (!buffer_mapped(bh)) { | 
|  | 4141 | BUFFER_TRACE(bh, "still unmapped"); | 
|  | 4142 | goto unlock; | 
|  | 4143 | } | 
|  | 4144 | } | 
|  | 4145 |  | 
|  | 4146 | /* Ok, it's mapped. Make sure it's up-to-date */ | 
|  | 4147 | if (PageUptodate(page)) | 
|  | 4148 | set_buffer_uptodate(bh); | 
|  | 4149 |  | 
|  | 4150 | if (!buffer_uptodate(bh)) { | 
|  | 4151 | err = -EIO; | 
|  | 4152 | ll_rw_block(READ, 1, &bh); | 
|  | 4153 | wait_on_buffer(bh); | 
|  | 4154 | /* Uhhuh. Read error. Complain and punt. */ | 
|  | 4155 | if (!buffer_uptodate(bh)) | 
|  | 4156 | goto unlock; | 
|  | 4157 | } | 
|  | 4158 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4159 | if (ext4_should_journal_data(inode)) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4160 | BUFFER_TRACE(bh, "get write access"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4161 | err = ext4_journal_get_write_access(handle, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4162 | if (err) | 
|  | 4163 | goto unlock; | 
|  | 4164 | } | 
|  | 4165 |  | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 4166 | zero_user(page, offset, length); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4167 |  | 
|  | 4168 | BUFFER_TRACE(bh, "zeroed end of block"); | 
|  | 4169 |  | 
|  | 4170 | err = 0; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4171 | if (ext4_should_journal_data(inode)) { | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 4172 | err = ext4_handle_dirty_metadata(handle, inode, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4173 | } else { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4174 | if (ext4_should_order_data(inode)) | 
| Jan Kara | 678aaf4 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4175 | err = ext4_jbd2_file_inode(handle, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4176 | mark_buffer_dirty(bh); | 
|  | 4177 | } | 
|  | 4178 |  | 
|  | 4179 | unlock: | 
|  | 4180 | unlock_page(page); | 
|  | 4181 | page_cache_release(page); | 
|  | 4182 | return err; | 
|  | 4183 | } | 
|  | 4184 |  | 
|  | 4185 | /* | 
|  | 4186 | * Probably it should be a library function... search for first non-zero word | 
|  | 4187 | * or memcmp with zero_page, whatever is better for particular architecture. | 
|  | 4188 | * Linus? | 
|  | 4189 | */ | 
|  | 4190 | static inline int all_zeroes(__le32 *p, __le32 *q) | 
|  | 4191 | { | 
|  | 4192 | while (p < q) | 
|  | 4193 | if (*p++) | 
|  | 4194 | return 0; | 
|  | 4195 | return 1; | 
|  | 4196 | } | 
|  | 4197 |  | 
|  | 4198 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4199 | *	ext4_find_shared - find the indirect blocks for partial truncation. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4200 | *	@inode:	  inode in question | 
|  | 4201 | *	@depth:	  depth of the affected branch | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4202 | *	@offsets: offsets of pointers in that branch (see ext4_block_to_path) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4203 | *	@chain:	  place to store the pointers to partial indirect blocks | 
|  | 4204 | *	@top:	  place to the (detached) top of branch | 
|  | 4205 | * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4206 | *	This is a helper function used by ext4_truncate(). | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4207 | * | 
|  | 4208 | *	When we do truncate() we may have to clean the ends of several | 
|  | 4209 | *	indirect blocks but leave the blocks themselves alive. Block is | 
|  | 4210 | *	partially truncated if some data below the new i_size is refered | 
|  | 4211 | *	from it (and it is on the path to the first completely truncated | 
|  | 4212 | *	data block, indeed).  We have to free the top of that path along | 
|  | 4213 | *	with everything to the right of the path. Since no allocation | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4214 | *	past the truncation point is possible until ext4_truncate() | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4215 | *	finishes, we may safely do the latter, but top of branch may | 
|  | 4216 | *	require special attention - pageout below the truncation point | 
|  | 4217 | *	might try to populate it. | 
|  | 4218 | * | 
|  | 4219 | *	We atomically detach the top of branch from the tree, store the | 
|  | 4220 | *	block number of its root in *@top, pointers to buffer_heads of | 
|  | 4221 | *	partially truncated blocks - in @chain[].bh and pointers to | 
|  | 4222 | *	their last elements that should not be removed - in | 
|  | 4223 | *	@chain[].p. Return value is the pointer to last filled element | 
|  | 4224 | *	of @chain. | 
|  | 4225 | * | 
|  | 4226 | *	The work left to caller to do the actual freeing of subtrees: | 
|  | 4227 | *		a) free the subtree starting from *@top | 
|  | 4228 | *		b) free the subtrees whose roots are stored in | 
|  | 4229 | *			(@chain[i].p+1 .. end of @chain[i].bh->b_data) | 
|  | 4230 | *		c) free the subtrees growing from the inode past the @chain[0]. | 
|  | 4231 | *			(no partially truncated stuff there).  */ | 
|  | 4232 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4233 | static Indirect *ext4_find_shared(struct inode *inode, int depth, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 4234 | ext4_lblk_t offsets[4], Indirect chain[4], | 
|  | 4235 | __le32 *top) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4236 | { | 
|  | 4237 | Indirect *partial, *p; | 
|  | 4238 | int k, err; | 
|  | 4239 |  | 
|  | 4240 | *top = 0; | 
| Uwe Kleine-König | bf48aab | 2009-10-28 20:11:03 +0100 | [diff] [blame] | 4241 | /* Make k index the deepest non-null offset + 1 */ | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4242 | for (k = depth; k > 1 && !offsets[k-1]; k--) | 
|  | 4243 | ; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4244 | partial = ext4_get_branch(inode, k, offsets, chain, &err); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4245 | /* Writer: pointers */ | 
|  | 4246 | if (!partial) | 
|  | 4247 | partial = chain + k-1; | 
|  | 4248 | /* | 
|  | 4249 | * If the branch acquired continuation since we've looked at it - | 
|  | 4250 | * fine, it should all survive and (new) top doesn't belong to us. | 
|  | 4251 | */ | 
|  | 4252 | if (!partial->key && *partial->p) | 
|  | 4253 | /* Writer: end */ | 
|  | 4254 | goto no_top; | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 4255 | for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4256 | ; | 
|  | 4257 | /* | 
|  | 4258 | * OK, we've found the last block that must survive. The rest of our | 
|  | 4259 | * branch should be detached before unlocking. However, if that rest | 
|  | 4260 | * of branch is all ours and does not grow immediately from the inode | 
|  | 4261 | * it's easier to cheat and just decrement partial->p. | 
|  | 4262 | */ | 
|  | 4263 | if (p == chain + k - 1 && p > chain) { | 
|  | 4264 | p->p--; | 
|  | 4265 | } else { | 
|  | 4266 | *top = *p->p; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4267 | /* Nope, don't do this in ext4.  Must leave the tree intact */ | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4268 | #if 0 | 
|  | 4269 | *p->p = 0; | 
|  | 4270 | #endif | 
|  | 4271 | } | 
|  | 4272 | /* Writer: end */ | 
|  | 4273 |  | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 4274 | while (partial > p) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4275 | brelse(partial->bh); | 
|  | 4276 | partial--; | 
|  | 4277 | } | 
|  | 4278 | no_top: | 
|  | 4279 | return partial; | 
|  | 4280 | } | 
|  | 4281 |  | 
|  | 4282 | /* | 
|  | 4283 | * Zero a number of block pointers in either an inode or an indirect block. | 
|  | 4284 | * If we restart the transaction we must again get write access to the | 
|  | 4285 | * indirect block for further modification. | 
|  | 4286 | * | 
|  | 4287 | * We release `count' blocks on disk, but (last - first) may be greater | 
|  | 4288 | * than `count' because there can be holes in there. | 
|  | 4289 | */ | 
| Theodore Ts'o | 1f2acb6 | 2010-01-22 17:40:42 -0500 | [diff] [blame] | 4290 | static int ext4_clear_blocks(handle_t *handle, struct inode *inode, | 
|  | 4291 | struct buffer_head *bh, | 
|  | 4292 | ext4_fsblk_t block_to_free, | 
|  | 4293 | unsigned long count, __le32 *first, | 
|  | 4294 | __le32 *last) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4295 | { | 
|  | 4296 | __le32 *p; | 
| Theodore Ts'o | 1f2acb6 | 2010-01-22 17:40:42 -0500 | [diff] [blame] | 4297 | int	flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 4298 |  | 
|  | 4299 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) | 
|  | 4300 | flags |= EXT4_FREE_BLOCKS_METADATA; | 
| Theodore Ts'o | 5068969 | 2009-11-23 07:17:34 -0500 | [diff] [blame] | 4301 |  | 
| Theodore Ts'o | 1f2acb6 | 2010-01-22 17:40:42 -0500 | [diff] [blame] | 4302 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, | 
|  | 4303 | count)) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 4304 | ext4_error(inode->i_sb, "inode #%lu: " | 
| Theodore Ts'o | 1f2acb6 | 2010-01-22 17:40:42 -0500 | [diff] [blame] | 4305 | "attempt to clear blocks %llu len %lu, invalid", | 
|  | 4306 | inode->i_ino, (unsigned long long) block_to_free, | 
|  | 4307 | count); | 
|  | 4308 | return 1; | 
|  | 4309 | } | 
|  | 4310 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4311 | if (try_to_extend_transaction(handle, inode)) { | 
|  | 4312 | if (bh) { | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 4313 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 
|  | 4314 | ext4_handle_dirty_metadata(handle, inode, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4315 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4316 | ext4_mark_inode_dirty(handle, inode); | 
| Jan Kara | 487caee | 2009-08-17 22:17:20 -0400 | [diff] [blame] | 4317 | ext4_truncate_restart_trans(handle, inode, | 
|  | 4318 | blocks_for_truncate(inode)); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4319 | if (bh) { | 
|  | 4320 | BUFFER_TRACE(bh, "retaking write access"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4321 | ext4_journal_get_write_access(handle, bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4322 | } | 
|  | 4323 | } | 
|  | 4324 |  | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 4325 | for (p = first; p < last; p++) | 
|  | 4326 | *p = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4327 |  | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 4328 | ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); | 
| Theodore Ts'o | 1f2acb6 | 2010-01-22 17:40:42 -0500 | [diff] [blame] | 4329 | return 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4330 | } | 
|  | 4331 |  | 
|  | 4332 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4333 | * ext4_free_data - free a list of data blocks | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4334 | * @handle:	handle for this transaction | 
|  | 4335 | * @inode:	inode we are dealing with | 
|  | 4336 | * @this_bh:	indirect buffer_head which contains *@first and *@last | 
|  | 4337 | * @first:	array of block numbers | 
|  | 4338 | * @last:	points immediately past the end of array | 
|  | 4339 | * | 
|  | 4340 | * We are freeing all blocks refered from that array (numbers are stored as | 
|  | 4341 | * little-endian 32-bit) and updating @inode->i_blocks appropriately. | 
|  | 4342 | * | 
|  | 4343 | * We accumulate contiguous runs of blocks to free.  Conveniently, if these | 
|  | 4344 | * blocks are contiguous then releasing them at one time will only affect one | 
|  | 4345 | * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't | 
|  | 4346 | * actually use a lot of journal space. | 
|  | 4347 | * | 
|  | 4348 | * @this_bh will be %NULL if @first and @last point into the inode's direct | 
|  | 4349 | * block pointers. | 
|  | 4350 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4351 | static void ext4_free_data(handle_t *handle, struct inode *inode, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4352 | struct buffer_head *this_bh, | 
|  | 4353 | __le32 *first, __le32 *last) | 
|  | 4354 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4355 | ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */ | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4356 | unsigned long count = 0;	    /* Number of blocks in the run */ | 
|  | 4357 | __le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind | 
|  | 4358 | corresponding to | 
|  | 4359 | block_to_free */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4360 | ext4_fsblk_t nr;		    /* Current block # */ | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4361 | __le32 *p;			    /* Pointer into inode/ind | 
|  | 4362 | for current block */ | 
|  | 4363 | int err; | 
|  | 4364 |  | 
|  | 4365 | if (this_bh) {				/* For indirect block */ | 
|  | 4366 | BUFFER_TRACE(this_bh, "get_write_access"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4367 | err = ext4_journal_get_write_access(handle, this_bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4368 | /* Important: if we can't update the indirect pointers | 
|  | 4369 | * to the blocks, we can't free them. */ | 
|  | 4370 | if (err) | 
|  | 4371 | return; | 
|  | 4372 | } | 
|  | 4373 |  | 
|  | 4374 | for (p = first; p < last; p++) { | 
|  | 4375 | nr = le32_to_cpu(*p); | 
|  | 4376 | if (nr) { | 
|  | 4377 | /* accumulate blocks to free if they're contiguous */ | 
|  | 4378 | if (count == 0) { | 
|  | 4379 | block_to_free = nr; | 
|  | 4380 | block_to_free_p = p; | 
|  | 4381 | count = 1; | 
|  | 4382 | } else if (nr == block_to_free + count) { | 
|  | 4383 | count++; | 
|  | 4384 | } else { | 
| Theodore Ts'o | 1f2acb6 | 2010-01-22 17:40:42 -0500 | [diff] [blame] | 4385 | if (ext4_clear_blocks(handle, inode, this_bh, | 
|  | 4386 | block_to_free, count, | 
|  | 4387 | block_to_free_p, p)) | 
|  | 4388 | break; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4389 | block_to_free = nr; | 
|  | 4390 | block_to_free_p = p; | 
|  | 4391 | count = 1; | 
|  | 4392 | } | 
|  | 4393 | } | 
|  | 4394 | } | 
|  | 4395 |  | 
|  | 4396 | if (count > 0) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4397 | ext4_clear_blocks(handle, inode, this_bh, block_to_free, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4398 | count, block_to_free_p, p); | 
|  | 4399 |  | 
|  | 4400 | if (this_bh) { | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 4401 | BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); | 
| Duane Griffin | 71dc8fb | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4402 |  | 
|  | 4403 | /* | 
|  | 4404 | * The buffer head should have an attached journal head at this | 
|  | 4405 | * point. However, if the data is corrupted and an indirect | 
|  | 4406 | * block pointed to itself, it would have been detached when | 
|  | 4407 | * the block was cleared. Check for this instead of OOPSing. | 
|  | 4408 | */ | 
| Theodore Ts'o | e7f0796 | 2009-01-20 09:50:19 -0500 | [diff] [blame] | 4409 | if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 4410 | ext4_handle_dirty_metadata(handle, inode, this_bh); | 
| Duane Griffin | 71dc8fb | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4411 | else | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 4412 | ext4_error(inode->i_sb, | 
| Duane Griffin | 71dc8fb | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4413 | "circular indirect block detected, " | 
|  | 4414 | "inode=%lu, block=%llu", | 
|  | 4415 | inode->i_ino, | 
|  | 4416 | (unsigned long long) this_bh->b_blocknr); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4417 | } | 
|  | 4418 | } | 
|  | 4419 |  | 
|  | 4420 | /** | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4421 | *	ext4_free_branches - free an array of branches | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4422 | *	@handle: JBD handle for this transaction | 
|  | 4423 | *	@inode:	inode we are dealing with | 
|  | 4424 | *	@parent_bh: the buffer_head which contains *@first and *@last | 
|  | 4425 | *	@first:	array of block numbers | 
|  | 4426 | *	@last:	pointer immediately past the end of array | 
|  | 4427 | *	@depth:	depth of the branches to free | 
|  | 4428 | * | 
|  | 4429 | *	We are freeing all blocks refered from these branches (numbers are | 
|  | 4430 | *	stored as little-endian 32-bit) and updating @inode->i_blocks | 
|  | 4431 | *	appropriately. | 
|  | 4432 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4433 | static void ext4_free_branches(handle_t *handle, struct inode *inode, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4434 | struct buffer_head *parent_bh, | 
|  | 4435 | __le32 *first, __le32 *last, int depth) | 
|  | 4436 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4437 | ext4_fsblk_t nr; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4438 | __le32 *p; | 
|  | 4439 |  | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 4440 | if (ext4_handle_is_aborted(handle)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4441 | return; | 
|  | 4442 |  | 
|  | 4443 | if (depth--) { | 
|  | 4444 | struct buffer_head *bh; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4445 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4446 | p = last; | 
|  | 4447 | while (--p >= first) { | 
|  | 4448 | nr = le32_to_cpu(*p); | 
|  | 4449 | if (!nr) | 
|  | 4450 | continue;		/* A hole */ | 
|  | 4451 |  | 
| Theodore Ts'o | 1f2acb6 | 2010-01-22 17:40:42 -0500 | [diff] [blame] | 4452 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), | 
|  | 4453 | nr, 1)) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 4454 | ext4_error(inode->i_sb, | 
| Theodore Ts'o | 1f2acb6 | 2010-01-22 17:40:42 -0500 | [diff] [blame] | 4455 | "indirect mapped block in inode " | 
|  | 4456 | "#%lu invalid (level %d, blk #%lu)", | 
|  | 4457 | inode->i_ino, depth, | 
|  | 4458 | (unsigned long) nr); | 
|  | 4459 | break; | 
|  | 4460 | } | 
|  | 4461 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4462 | /* Go read the buffer for the next level down */ | 
|  | 4463 | bh = sb_bread(inode->i_sb, nr); | 
|  | 4464 |  | 
|  | 4465 | /* | 
|  | 4466 | * A read failure? Report error and clear slot | 
|  | 4467 | * (should be rare). | 
|  | 4468 | */ | 
|  | 4469 | if (!bh) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 4470 | ext4_error(inode->i_sb, | 
| Mingming Cao | 2ae0210 | 2006-10-11 01:21:11 -0700 | [diff] [blame] | 4471 | "Read failure, inode=%lu, block=%llu", | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4472 | inode->i_ino, nr); | 
|  | 4473 | continue; | 
|  | 4474 | } | 
|  | 4475 |  | 
|  | 4476 | /* This zaps the entire block.  Bottom up. */ | 
|  | 4477 | BUFFER_TRACE(bh, "free child branches"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4478 | ext4_free_branches(handle, inode, bh, | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 4479 | (__le32 *) bh->b_data, | 
|  | 4480 | (__le32 *) bh->b_data + addr_per_block, | 
|  | 4481 | depth); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4482 |  | 
|  | 4483 | /* | 
|  | 4484 | * We've probably journalled the indirect block several | 
|  | 4485 | * times during the truncate.  But it's no longer | 
|  | 4486 | * needed and we now drop it from the transaction via | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 4487 | * jbd2_journal_revoke(). | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4488 | * | 
|  | 4489 | * That's easy if it's exclusively part of this | 
|  | 4490 | * transaction.  But if it's part of the committing | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 4491 | * transaction then jbd2_journal_forget() will simply | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4492 | * brelse() it.  That means that if the underlying | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4493 | * block is reallocated in ext4_get_block(), | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4494 | * unmap_underlying_metadata() will find this block | 
|  | 4495 | * and will try to get rid of it.  damn, damn. | 
|  | 4496 | * | 
|  | 4497 | * If this block has already been committed to the | 
|  | 4498 | * journal, a revoke record will be written.  And | 
|  | 4499 | * revoke records must be emitted *before* clearing | 
|  | 4500 | * this block's bit in the bitmaps. | 
|  | 4501 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4502 | ext4_forget(handle, 1, inode, bh, bh->b_blocknr); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4503 |  | 
|  | 4504 | /* | 
|  | 4505 | * Everything below this this pointer has been | 
|  | 4506 | * released.  Now let this top-of-subtree go. | 
|  | 4507 | * | 
|  | 4508 | * We want the freeing of this indirect block to be | 
|  | 4509 | * atomic in the journal with the updating of the | 
|  | 4510 | * bitmap block which owns it.  So make some room in | 
|  | 4511 | * the journal. | 
|  | 4512 | * | 
|  | 4513 | * We zero the parent pointer *after* freeing its | 
|  | 4514 | * pointee in the bitmaps, so if extend_transaction() | 
|  | 4515 | * for some reason fails to put the bitmap changes and | 
|  | 4516 | * the release into the same transaction, recovery | 
|  | 4517 | * will merely complain about releasing a free block, | 
|  | 4518 | * rather than leaking blocks. | 
|  | 4519 | */ | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 4520 | if (ext4_handle_is_aborted(handle)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4521 | return; | 
|  | 4522 | if (try_to_extend_transaction(handle, inode)) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4523 | ext4_mark_inode_dirty(handle, inode); | 
| Jan Kara | 487caee | 2009-08-17 22:17:20 -0400 | [diff] [blame] | 4524 | ext4_truncate_restart_trans(handle, inode, | 
|  | 4525 | blocks_for_truncate(inode)); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4526 | } | 
|  | 4527 |  | 
| Theodore Ts'o | e636260 | 2009-11-23 07:17:05 -0500 | [diff] [blame] | 4528 | ext4_free_blocks(handle, inode, 0, nr, 1, | 
|  | 4529 | EXT4_FREE_BLOCKS_METADATA); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4530 |  | 
|  | 4531 | if (parent_bh) { | 
|  | 4532 | /* | 
|  | 4533 | * The block which we have just freed is | 
|  | 4534 | * pointed to by an indirect block: journal it | 
|  | 4535 | */ | 
|  | 4536 | BUFFER_TRACE(parent_bh, "get_write_access"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4537 | if (!ext4_journal_get_write_access(handle, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4538 | parent_bh)){ | 
|  | 4539 | *p = 0; | 
|  | 4540 | BUFFER_TRACE(parent_bh, | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 4541 | "call ext4_handle_dirty_metadata"); | 
|  | 4542 | ext4_handle_dirty_metadata(handle, | 
|  | 4543 | inode, | 
|  | 4544 | parent_bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4545 | } | 
|  | 4546 | } | 
|  | 4547 | } | 
|  | 4548 | } else { | 
|  | 4549 | /* We have reached the bottom of the tree. */ | 
|  | 4550 | BUFFER_TRACE(parent_bh, "free data blocks"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4551 | ext4_free_data(handle, inode, parent_bh, first, last); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4552 | } | 
|  | 4553 | } | 
|  | 4554 |  | 
| Duane Griffin | 91ef4ca | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4555 | int ext4_can_truncate(struct inode *inode) | 
|  | 4556 | { | 
|  | 4557 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | 
|  | 4558 | return 0; | 
|  | 4559 | if (S_ISREG(inode->i_mode)) | 
|  | 4560 | return 1; | 
|  | 4561 | if (S_ISDIR(inode->i_mode)) | 
|  | 4562 | return 1; | 
|  | 4563 | if (S_ISLNK(inode->i_mode)) | 
|  | 4564 | return !ext4_inode_is_fast_symlink(inode); | 
|  | 4565 | return 0; | 
|  | 4566 | } | 
|  | 4567 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4568 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4569 | * ext4_truncate() | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4570 | * | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4571 | * We block out ext4_get_block() block instantiations across the entire | 
|  | 4572 | * transaction, and VFS/VM ensures that ext4_truncate() cannot run | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4573 | * simultaneously on behalf of the same inode. | 
|  | 4574 | * | 
|  | 4575 | * As we work through the truncate and commmit bits of it to the journal there | 
|  | 4576 | * is one core, guiding principle: the file's tree must always be consistent on | 
|  | 4577 | * disk.  We must be able to restart the truncate after a crash. | 
|  | 4578 | * | 
|  | 4579 | * The file's tree may be transiently inconsistent in memory (although it | 
|  | 4580 | * probably isn't), but whenever we close off and commit a journal transaction, | 
|  | 4581 | * the contents of (the filesystem + the journal) must be consistent and | 
|  | 4582 | * restartable.  It's pretty simple, really: bottom up, right to left (although | 
|  | 4583 | * left-to-right works OK too). | 
|  | 4584 | * | 
|  | 4585 | * Note that at recovery time, journal replay occurs *before* the restart of | 
|  | 4586 | * truncate against the orphan inode list. | 
|  | 4587 | * | 
|  | 4588 | * The committed inode has the new, desired i_size (which is the same as | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4589 | * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4590 | * that this inode's truncate did not complete and it will again call | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4591 | * ext4_truncate() to have another go.  So there will be instantiated blocks | 
|  | 4592 | * to the right of the truncation point in a crashed ext4 filesystem.  But | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4593 | * that's fine - as long as they are linked from the inode, the post-crash | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4594 | * ext4_truncate() run will find them and release them. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4595 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4596 | void ext4_truncate(struct inode *inode) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4597 | { | 
|  | 4598 | handle_t *handle; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4599 | struct ext4_inode_info *ei = EXT4_I(inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4600 | __le32 *i_data = ei->i_data; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4601 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4602 | struct address_space *mapping = inode->i_mapping; | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 4603 | ext4_lblk_t offsets[4]; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4604 | Indirect chain[4]; | 
|  | 4605 | Indirect *partial; | 
|  | 4606 | __le32 nr = 0; | 
|  | 4607 | int n; | 
| Aneesh Kumar K.V | 725d26d | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 4608 | ext4_lblk_t last_block; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4609 | unsigned blocksize = inode->i_sb->s_blocksize; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4610 |  | 
| Duane Griffin | 91ef4ca | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4611 | if (!ext4_can_truncate(inode)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4612 | return; | 
|  | 4613 |  | 
| Jiaying Zhang | c8d46e4 | 2010-02-24 09:52:53 -0500 | [diff] [blame] | 4614 | EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; | 
|  | 4615 |  | 
| Theodore Ts'o | 5534fb5 | 2009-09-17 09:34:16 -0400 | [diff] [blame] | 4616 | if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 4617 | ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); | 
| Theodore Ts'o | 7d8f9f7 | 2009-02-24 08:21:14 -0500 | [diff] [blame] | 4618 |  | 
| Aneesh Kumar K.V | 1d03ec9 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 4619 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4620 | ext4_ext_truncate(inode); | 
| Aneesh Kumar K.V | 1d03ec9 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 4621 | return; | 
|  | 4622 | } | 
| Alex Tomas | a86c618 | 2006-10-11 01:21:03 -0700 | [diff] [blame] | 4623 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4624 | handle = start_transaction(inode); | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4625 | if (IS_ERR(handle)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4626 | return;		/* AKPM: return what? */ | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4627 |  | 
|  | 4628 | last_block = (inode->i_size + blocksize-1) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4629 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4630 |  | 
| Jan Kara | cf108bc | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4631 | if (inode->i_size & (blocksize - 1)) | 
|  | 4632 | if (ext4_block_truncate_page(handle, mapping, inode->i_size)) | 
|  | 4633 | goto out_stop; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4634 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4635 | n = ext4_block_to_path(inode, last_block, offsets, NULL); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4636 | if (n == 0) | 
|  | 4637 | goto out_stop;	/* error */ | 
|  | 4638 |  | 
|  | 4639 | /* | 
|  | 4640 | * OK.  This truncate is going to happen.  We add the inode to the | 
|  | 4641 | * orphan list, so that if this truncate spans multiple transactions, | 
|  | 4642 | * and we crash, we will resume the truncate when the filesystem | 
|  | 4643 | * recovers.  It also marks the inode dirty, to catch the new size. | 
|  | 4644 | * | 
|  | 4645 | * Implication: the file must always be in a sane, consistent | 
|  | 4646 | * truncatable state while each transaction commits. | 
|  | 4647 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4648 | if (ext4_orphan_add(handle, inode)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4649 | goto out_stop; | 
|  | 4650 |  | 
|  | 4651 | /* | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4652 | * From here we block out all ext4_get_block() callers who want to | 
|  | 4653 | * modify the block allocation tree. | 
|  | 4654 | */ | 
|  | 4655 | down_write(&ei->i_data_sem); | 
| Theodore Ts'o | b4df203 | 2008-08-13 21:44:34 -0400 | [diff] [blame] | 4656 |  | 
| Theodore Ts'o | c2ea3fd | 2008-10-10 09:40:52 -0400 | [diff] [blame] | 4657 | ext4_discard_preallocations(inode); | 
| Theodore Ts'o | b4df203 | 2008-08-13 21:44:34 -0400 | [diff] [blame] | 4658 |  | 
| Mingming Cao | 632eaea | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 4659 | /* | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4660 | * The orphan list entry will now protect us from any crash which | 
|  | 4661 | * occurs before the truncate completes, so it is now safe to propagate | 
|  | 4662 | * the new, shorter inode size (held for now in i_size) into the | 
|  | 4663 | * on-disk inode. We do this via i_disksize, which is the value which | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4664 | * ext4 *really* writes onto the disk inode. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4665 | */ | 
|  | 4666 | ei->i_disksize = inode->i_size; | 
|  | 4667 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4668 | if (n == 1) {		/* direct blocks */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4669 | ext4_free_data(handle, inode, NULL, i_data+offsets[0], | 
|  | 4670 | i_data + EXT4_NDIR_BLOCKS); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4671 | goto do_indirects; | 
|  | 4672 | } | 
|  | 4673 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4674 | partial = ext4_find_shared(inode, n, offsets, chain, &nr); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4675 | /* Kill the top of shared branch (not detached) */ | 
|  | 4676 | if (nr) { | 
|  | 4677 | if (partial == chain) { | 
|  | 4678 | /* Shared branch grows from the inode */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4679 | ext4_free_branches(handle, inode, NULL, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4680 | &nr, &nr+1, (chain+n-1) - partial); | 
|  | 4681 | *partial->p = 0; | 
|  | 4682 | /* | 
|  | 4683 | * We mark the inode dirty prior to restart, | 
|  | 4684 | * and prior to stop.  No need for it here. | 
|  | 4685 | */ | 
|  | 4686 | } else { | 
|  | 4687 | /* Shared branch grows from an indirect block */ | 
|  | 4688 | BUFFER_TRACE(partial->bh, "get_write_access"); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4689 | ext4_free_branches(handle, inode, partial->bh, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4690 | partial->p, | 
|  | 4691 | partial->p+1, (chain+n-1) - partial); | 
|  | 4692 | } | 
|  | 4693 | } | 
|  | 4694 | /* Clear the ends of indirect blocks on the shared branch */ | 
|  | 4695 | while (partial > chain) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4696 | ext4_free_branches(handle, inode, partial->bh, partial->p + 1, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4697 | (__le32*)partial->bh->b_data+addr_per_block, | 
|  | 4698 | (chain+n-1) - partial); | 
|  | 4699 | BUFFER_TRACE(partial->bh, "call brelse"); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 4700 | brelse(partial->bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4701 | partial--; | 
|  | 4702 | } | 
|  | 4703 | do_indirects: | 
|  | 4704 | /* Kill the remaining (whole) subtrees */ | 
|  | 4705 | switch (offsets[0]) { | 
|  | 4706 | default: | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4707 | nr = i_data[EXT4_IND_BLOCK]; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4708 | if (nr) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4709 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); | 
|  | 4710 | i_data[EXT4_IND_BLOCK] = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4711 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4712 | case EXT4_IND_BLOCK: | 
|  | 4713 | nr = i_data[EXT4_DIND_BLOCK]; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4714 | if (nr) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4715 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); | 
|  | 4716 | i_data[EXT4_DIND_BLOCK] = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4717 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4718 | case EXT4_DIND_BLOCK: | 
|  | 4719 | nr = i_data[EXT4_TIND_BLOCK]; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4720 | if (nr) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4721 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); | 
|  | 4722 | i_data[EXT4_TIND_BLOCK] = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4723 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4724 | case EXT4_TIND_BLOCK: | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4725 | ; | 
|  | 4726 | } | 
|  | 4727 |  | 
| Aneesh Kumar K.V | 0e855ac | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 4728 | up_write(&ei->i_data_sem); | 
| Kalpak Shah | ef7f383 | 2007-07-18 09:15:20 -0400 | [diff] [blame] | 4729 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4730 | ext4_mark_inode_dirty(handle, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4731 |  | 
|  | 4732 | /* | 
|  | 4733 | * In a multi-transaction truncate, we only make the final transaction | 
|  | 4734 | * synchronous | 
|  | 4735 | */ | 
|  | 4736 | if (IS_SYNC(inode)) | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 4737 | ext4_handle_sync(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4738 | out_stop: | 
|  | 4739 | /* | 
|  | 4740 | * If this was a simple ftruncate(), and the file will remain alive | 
|  | 4741 | * then we need to clear up the orphan record which we created above. | 
|  | 4742 | * However, if this was a real unlink then we were called by | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4743 | * ext4_delete_inode(), and we allow that function to clean up the | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4744 | * orphan info for us. | 
|  | 4745 | */ | 
|  | 4746 | if (inode->i_nlink) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4747 | ext4_orphan_del(handle, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4748 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4749 | ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4750 | } | 
|  | 4751 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4752 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4753 | * ext4_get_inode_loc returns with an extra refcount against the inode's | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4754 | * underlying buffer_head on success. If 'in_mem' is true, we have all | 
|  | 4755 | * data in memory that is needed to recreate the on-disk version of this | 
|  | 4756 | * inode. | 
|  | 4757 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4758 | static int __ext4_get_inode_loc(struct inode *inode, | 
|  | 4759 | struct ext4_iloc *iloc, int in_mem) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4760 | { | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4761 | struct ext4_group_desc	*gdp; | 
|  | 4762 | struct buffer_head	*bh; | 
|  | 4763 | struct super_block	*sb = inode->i_sb; | 
|  | 4764 | ext4_fsblk_t		block; | 
|  | 4765 | int			inodes_per_block, inode_offset; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4766 |  | 
| Aneesh Kumar K.V | 3a06d77 | 2008-11-22 15:04:59 -0500 | [diff] [blame] | 4767 | iloc->bh = NULL; | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4768 | if (!ext4_valid_inum(sb, inode->i_ino)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4769 | return -EIO; | 
|  | 4770 |  | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4771 | iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); | 
|  | 4772 | gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); | 
|  | 4773 | if (!gdp) | 
|  | 4774 | return -EIO; | 
|  | 4775 |  | 
|  | 4776 | /* | 
|  | 4777 | * Figure out the offset within the block group inode table | 
|  | 4778 | */ | 
|  | 4779 | inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); | 
|  | 4780 | inode_offset = ((inode->i_ino - 1) % | 
|  | 4781 | EXT4_INODES_PER_GROUP(sb)); | 
|  | 4782 | block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); | 
|  | 4783 | iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); | 
|  | 4784 |  | 
|  | 4785 | bh = sb_getblk(sb, block); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4786 | if (!bh) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 4787 | ext4_error(sb, "unable to read inode block - " | 
|  | 4788 | "inode=%lu, block=%llu", inode->i_ino, block); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4789 | return -EIO; | 
|  | 4790 | } | 
|  | 4791 | if (!buffer_uptodate(bh)) { | 
|  | 4792 | lock_buffer(bh); | 
| Hidehiro Kawai | 9c83a92 | 2008-07-26 16:39:26 -0400 | [diff] [blame] | 4793 |  | 
|  | 4794 | /* | 
|  | 4795 | * If the buffer has the write error flag, we have failed | 
|  | 4796 | * to write out another inode in the same block.  In this | 
|  | 4797 | * case, we don't have to read the block because we may | 
|  | 4798 | * read the old inode data successfully. | 
|  | 4799 | */ | 
|  | 4800 | if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) | 
|  | 4801 | set_buffer_uptodate(bh); | 
|  | 4802 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4803 | if (buffer_uptodate(bh)) { | 
|  | 4804 | /* someone brought it uptodate while we waited */ | 
|  | 4805 | unlock_buffer(bh); | 
|  | 4806 | goto has_buffer; | 
|  | 4807 | } | 
|  | 4808 |  | 
|  | 4809 | /* | 
|  | 4810 | * If we have all information of the inode in memory and this | 
|  | 4811 | * is the only valid inode in the block, we need not read the | 
|  | 4812 | * block. | 
|  | 4813 | */ | 
|  | 4814 | if (in_mem) { | 
|  | 4815 | struct buffer_head *bitmap_bh; | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4816 | int i, start; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4817 |  | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4818 | start = inode_offset & ~(inodes_per_block - 1); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4819 |  | 
|  | 4820 | /* Is the inode bitmap in cache? */ | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4821 | bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4822 | if (!bitmap_bh) | 
|  | 4823 | goto make_io; | 
|  | 4824 |  | 
|  | 4825 | /* | 
|  | 4826 | * If the inode bitmap isn't in cache then the | 
|  | 4827 | * optimisation may end up performing two reads instead | 
|  | 4828 | * of one, so skip it. | 
|  | 4829 | */ | 
|  | 4830 | if (!buffer_uptodate(bitmap_bh)) { | 
|  | 4831 | brelse(bitmap_bh); | 
|  | 4832 | goto make_io; | 
|  | 4833 | } | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4834 | for (i = start; i < start + inodes_per_block; i++) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4835 | if (i == inode_offset) | 
|  | 4836 | continue; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4837 | if (ext4_test_bit(i, bitmap_bh->b_data)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4838 | break; | 
|  | 4839 | } | 
|  | 4840 | brelse(bitmap_bh); | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4841 | if (i == start + inodes_per_block) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4842 | /* all other inodes are free, so skip I/O */ | 
|  | 4843 | memset(bh->b_data, 0, bh->b_size); | 
|  | 4844 | set_buffer_uptodate(bh); | 
|  | 4845 | unlock_buffer(bh); | 
|  | 4846 | goto has_buffer; | 
|  | 4847 | } | 
|  | 4848 | } | 
|  | 4849 |  | 
|  | 4850 | make_io: | 
|  | 4851 | /* | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4852 | * If we need to do any I/O, try to pre-readahead extra | 
|  | 4853 | * blocks from the inode table. | 
|  | 4854 | */ | 
|  | 4855 | if (EXT4_SB(sb)->s_inode_readahead_blks) { | 
|  | 4856 | ext4_fsblk_t b, end, table; | 
|  | 4857 | unsigned num; | 
|  | 4858 |  | 
|  | 4859 | table = ext4_inode_table(sb, gdp); | 
| Theodore Ts'o | b713a5e | 2009-03-31 09:11:14 -0400 | [diff] [blame] | 4860 | /* s_inode_readahead_blks is always a power of 2 */ | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4861 | b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); | 
|  | 4862 | if (table > b) | 
|  | 4863 | b = table; | 
|  | 4864 | end = b + EXT4_SB(sb)->s_inode_readahead_blks; | 
|  | 4865 | num = EXT4_INODES_PER_GROUP(sb); | 
|  | 4866 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, | 
|  | 4867 | EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) | 
| Aneesh Kumar K.V | 560671a | 2009-01-05 22:20:24 -0500 | [diff] [blame] | 4868 | num -= ext4_itable_unused_count(sb, gdp); | 
| Theodore Ts'o | 240799c | 2008-10-09 23:53:47 -0400 | [diff] [blame] | 4869 | table += num / inodes_per_block; | 
|  | 4870 | if (end > table) | 
|  | 4871 | end = table; | 
|  | 4872 | while (b <= end) | 
|  | 4873 | sb_breadahead(sb, b++); | 
|  | 4874 | } | 
|  | 4875 |  | 
|  | 4876 | /* | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4877 | * There are other valid inodes in the buffer, this inode | 
|  | 4878 | * has in-inode xattrs, or we don't have this inode in memory. | 
|  | 4879 | * Read the block from disk. | 
|  | 4880 | */ | 
|  | 4881 | get_bh(bh); | 
|  | 4882 | bh->b_end_io = end_buffer_read_sync; | 
|  | 4883 | submit_bh(READ_META, bh); | 
|  | 4884 | wait_on_buffer(bh); | 
|  | 4885 | if (!buffer_uptodate(bh)) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 4886 | ext4_error(sb, "unable to read inode block - inode=%lu," | 
|  | 4887 | " block=%llu", inode->i_ino, block); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4888 | brelse(bh); | 
|  | 4889 | return -EIO; | 
|  | 4890 | } | 
|  | 4891 | } | 
|  | 4892 | has_buffer: | 
|  | 4893 | iloc->bh = bh; | 
|  | 4894 | return 0; | 
|  | 4895 | } | 
|  | 4896 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4897 | int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4898 | { | 
|  | 4899 | /* We have all inode data except xattrs in memory here. */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4900 | return __ext4_get_inode_loc(inode, iloc, | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 4901 | !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4902 | } | 
|  | 4903 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4904 | void ext4_set_inode_flags(struct inode *inode) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4905 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4906 | unsigned int flags = EXT4_I(inode)->i_flags; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4907 |  | 
|  | 4908 | inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4909 | if (flags & EXT4_SYNC_FL) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4910 | inode->i_flags |= S_SYNC; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4911 | if (flags & EXT4_APPEND_FL) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4912 | inode->i_flags |= S_APPEND; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4913 | if (flags & EXT4_IMMUTABLE_FL) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4914 | inode->i_flags |= S_IMMUTABLE; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4915 | if (flags & EXT4_NOATIME_FL) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4916 | inode->i_flags |= S_NOATIME; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4917 | if (flags & EXT4_DIRSYNC_FL) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4918 | inode->i_flags |= S_DIRSYNC; | 
|  | 4919 | } | 
|  | 4920 |  | 
| Jan Kara | ff9ddf7 | 2007-07-18 09:24:20 -0400 | [diff] [blame] | 4921 | /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ | 
|  | 4922 | void ext4_get_inode_flags(struct ext4_inode_info *ei) | 
|  | 4923 | { | 
|  | 4924 | unsigned int flags = ei->vfs_inode.i_flags; | 
|  | 4925 |  | 
|  | 4926 | ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| | 
|  | 4927 | EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); | 
|  | 4928 | if (flags & S_SYNC) | 
|  | 4929 | ei->i_flags |= EXT4_SYNC_FL; | 
|  | 4930 | if (flags & S_APPEND) | 
|  | 4931 | ei->i_flags |= EXT4_APPEND_FL; | 
|  | 4932 | if (flags & S_IMMUTABLE) | 
|  | 4933 | ei->i_flags |= EXT4_IMMUTABLE_FL; | 
|  | 4934 | if (flags & S_NOATIME) | 
|  | 4935 | ei->i_flags |= EXT4_NOATIME_FL; | 
|  | 4936 | if (flags & S_DIRSYNC) | 
|  | 4937 | ei->i_flags |= EXT4_DIRSYNC_FL; | 
|  | 4938 | } | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 4939 |  | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 4940 | static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 4941 | struct ext4_inode_info *ei) | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 4942 | { | 
|  | 4943 | blkcnt_t i_blocks ; | 
| Aneesh Kumar K.V | 8180a56 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 4944 | struct inode *inode = &(ei->vfs_inode); | 
|  | 4945 | struct super_block *sb = inode->i_sb; | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 4946 |  | 
|  | 4947 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, | 
|  | 4948 | EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { | 
|  | 4949 | /* we are using combined 48 bit field */ | 
|  | 4950 | i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | | 
|  | 4951 | le32_to_cpu(raw_inode->i_blocks_lo); | 
| Aneesh Kumar K.V | 8180a56 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 4952 | if (ei->i_flags & EXT4_HUGE_FILE_FL) { | 
|  | 4953 | /* i_blocks represent file system block size */ | 
|  | 4954 | return i_blocks  << (inode->i_blkbits - 9); | 
|  | 4955 | } else { | 
|  | 4956 | return i_blocks; | 
|  | 4957 | } | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 4958 | } else { | 
|  | 4959 | return le32_to_cpu(raw_inode->i_blocks_lo); | 
|  | 4960 | } | 
|  | 4961 | } | 
| Jan Kara | ff9ddf7 | 2007-07-18 09:24:20 -0400 | [diff] [blame] | 4962 |  | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 4963 | struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4964 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4965 | struct ext4_iloc iloc; | 
|  | 4966 | struct ext4_inode *raw_inode; | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 4967 | struct ext4_inode_info *ei; | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 4968 | struct inode *inode; | 
| Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 4969 | journal_t *journal = EXT4_SB(sb)->s_journal; | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 4970 | long ret; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4971 | int block; | 
|  | 4972 |  | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 4973 | inode = iget_locked(sb, ino); | 
|  | 4974 | if (!inode) | 
|  | 4975 | return ERR_PTR(-ENOMEM); | 
|  | 4976 | if (!(inode->i_state & I_NEW)) | 
|  | 4977 | return inode; | 
|  | 4978 |  | 
|  | 4979 | ei = EXT4_I(inode); | 
| Theodore Ts'o | 567f3e9 | 2009-11-14 08:19:05 -0500 | [diff] [blame] | 4980 | iloc.bh = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4981 |  | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 4982 | ret = __ext4_get_inode_loc(inode, &iloc, 0); | 
|  | 4983 | if (ret < 0) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4984 | goto bad_inode; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 4985 | raw_inode = ext4_raw_inode(&iloc); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4986 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); | 
|  | 4987 | inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); | 
|  | 4988 | inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 4989 | if (!(test_opt(inode->i_sb, NO_UID32))) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4990 | inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; | 
|  | 4991 | inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; | 
|  | 4992 | } | 
|  | 4993 | inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4994 |  | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 4995 | ei->i_state_flags = 0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 4996 | ei->i_dir_start_lookup = 0; | 
|  | 4997 | ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); | 
|  | 4998 | /* We now have enough fields to check if the inode was active or not. | 
|  | 4999 | * This is needed because nfsd might try to access dead inodes | 
|  | 5000 | * the test is that same one that e2fsck uses | 
|  | 5001 | * NeilBrown 1999oct15 | 
|  | 5002 | */ | 
|  | 5003 | if (inode->i_nlink == 0) { | 
|  | 5004 | if (inode->i_mode == 0 || | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5005 | !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5006 | /* this inode is deleted */ | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 5007 | ret = -ESTALE; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5008 | goto bad_inode; | 
|  | 5009 | } | 
|  | 5010 | /* The only unlinked inodes we let through here have | 
|  | 5011 | * valid i_mode and are being read by the orphan | 
|  | 5012 | * recovery code: that's fine, we're about to complete | 
|  | 5013 | * the process of deleting those. */ | 
|  | 5014 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5015 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5016 | inode->i_blocks = ext4_inode_blocks(raw_inode, ei); | 
| Aneesh Kumar K.V | 7973c0c | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5017 | ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); | 
| Theodore Ts'o | a9e8174 | 2009-04-24 16:11:18 -0400 | [diff] [blame] | 5018 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) | 
| Badari Pulavarty | a1ddeb7 | 2006-10-11 01:21:09 -0700 | [diff] [blame] | 5019 | ei->i_file_acl |= | 
|  | 5020 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; | 
| Aneesh Kumar K.V | a48380f | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5021 | inode->i_size = ext4_isize(raw_inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5022 | ei->i_disksize = inode->i_size; | 
| Dmitry Monakhov | a9e7f44 | 2009-12-14 15:21:14 +0300 | [diff] [blame] | 5023 | #ifdef CONFIG_QUOTA | 
|  | 5024 | ei->i_reserved_quota = 0; | 
|  | 5025 | #endif | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5026 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); | 
|  | 5027 | ei->i_block_group = iloc.block_group; | 
| Theodore Ts'o | a491212 | 2009-03-12 12:18:34 -0400 | [diff] [blame] | 5028 | ei->i_last_alloc_group = ~0; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5029 | /* | 
|  | 5030 | * NOTE! The in-memory inode i_data array is in little-endian order | 
|  | 5031 | * even on big-endian machines: we do NOT byteswap the block numbers! | 
|  | 5032 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5033 | for (block = 0; block < EXT4_N_BLOCKS; block++) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5034 | ei->i_data[block] = raw_inode->i_block[block]; | 
|  | 5035 | INIT_LIST_HEAD(&ei->i_orphan); | 
|  | 5036 |  | 
| Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 5037 | /* | 
|  | 5038 | * Set transaction id's of transactions that have to be committed | 
|  | 5039 | * to finish f[data]sync. We set them to currently running transaction | 
|  | 5040 | * as we cannot be sure that the inode or some of its metadata isn't | 
|  | 5041 | * part of the transaction - the inode could have been reclaimed and | 
|  | 5042 | * now it is reread from disk. | 
|  | 5043 | */ | 
|  | 5044 | if (journal) { | 
|  | 5045 | transaction_t *transaction; | 
|  | 5046 | tid_t tid; | 
|  | 5047 |  | 
|  | 5048 | spin_lock(&journal->j_state_lock); | 
|  | 5049 | if (journal->j_running_transaction) | 
|  | 5050 | transaction = journal->j_running_transaction; | 
|  | 5051 | else | 
|  | 5052 | transaction = journal->j_committing_transaction; | 
|  | 5053 | if (transaction) | 
|  | 5054 | tid = transaction->t_tid; | 
|  | 5055 | else | 
|  | 5056 | tid = journal->j_commit_sequence; | 
|  | 5057 | spin_unlock(&journal->j_state_lock); | 
|  | 5058 | ei->i_sync_tid = tid; | 
|  | 5059 | ei->i_datasync_tid = tid; | 
|  | 5060 | } | 
|  | 5061 |  | 
| Eric Sandeen | 0040d98 | 2008-02-05 22:36:43 -0500 | [diff] [blame] | 5062 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5063 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5064 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > | 
| Kirill Korotaev | e5d2861 | 2007-06-23 17:16:51 -0700 | [diff] [blame] | 5065 | EXT4_INODE_SIZE(inode->i_sb)) { | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 5066 | ret = -EIO; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5067 | goto bad_inode; | 
| Kirill Korotaev | e5d2861 | 2007-06-23 17:16:51 -0700 | [diff] [blame] | 5068 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5069 | if (ei->i_extra_isize == 0) { | 
|  | 5070 | /* The extra space is currently unused. Use it. */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5071 | ei->i_extra_isize = sizeof(struct ext4_inode) - | 
|  | 5072 | EXT4_GOOD_OLD_INODE_SIZE; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5073 | } else { | 
|  | 5074 | __le32 *magic = (void *)raw_inode + | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5075 | EXT4_GOOD_OLD_INODE_SIZE + | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5076 | ei->i_extra_isize; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5077 | if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 5078 | ext4_set_inode_state(inode, EXT4_STATE_XATTR); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5079 | } | 
|  | 5080 | } else | 
|  | 5081 | ei->i_extra_isize = 0; | 
|  | 5082 |  | 
| Kalpak Shah | ef7f383 | 2007-07-18 09:15:20 -0400 | [diff] [blame] | 5083 | EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); | 
|  | 5084 | EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); | 
|  | 5085 | EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); | 
|  | 5086 | EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); | 
|  | 5087 |  | 
| Jean Noel Cordenner | 25ec56b | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5088 | inode->i_version = le32_to_cpu(raw_inode->i_disk_version); | 
|  | 5089 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { | 
|  | 5090 | if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) | 
|  | 5091 | inode->i_version |= | 
|  | 5092 | (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; | 
|  | 5093 | } | 
|  | 5094 |  | 
| Theodore Ts'o | c4b5a61 | 2009-04-24 18:45:35 -0400 | [diff] [blame] | 5095 | ret = 0; | 
| Theodore Ts'o | 485c26e | 2009-04-24 13:43:20 -0400 | [diff] [blame] | 5096 | if (ei->i_file_acl && | 
| Theodore Ts'o | 1032988 | 2009-11-15 15:29:56 -0500 | [diff] [blame] | 5097 | !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 5098 | ext4_error(sb, "bad extended attribute block %llu inode #%lu", | 
| Theodore Ts'o | 485c26e | 2009-04-24 13:43:20 -0400 | [diff] [blame] | 5099 | ei->i_file_acl, inode->i_ino); | 
|  | 5100 | ret = -EIO; | 
|  | 5101 | goto bad_inode; | 
|  | 5102 | } else if (ei->i_flags & EXT4_EXTENTS_FL) { | 
| Theodore Ts'o | c4b5a61 | 2009-04-24 18:45:35 -0400 | [diff] [blame] | 5103 | if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 
|  | 5104 | (S_ISLNK(inode->i_mode) && | 
|  | 5105 | !ext4_inode_is_fast_symlink(inode))) | 
|  | 5106 | /* Validate extent which is part of inode */ | 
|  | 5107 | ret = ext4_ext_check_inode(inode); | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 5108 | } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 5109 | (S_ISLNK(inode->i_mode) && | 
|  | 5110 | !ext4_inode_is_fast_symlink(inode))) { | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 5111 | /* Validate block references which are part of inode */ | 
| Thiemo Nagel | fe2c819 | 2009-03-31 08:36:10 -0400 | [diff] [blame] | 5112 | ret = ext4_check_inode_blockref(inode); | 
|  | 5113 | } | 
| Theodore Ts'o | 567f3e9 | 2009-11-14 08:19:05 -0500 | [diff] [blame] | 5114 | if (ret) | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 5115 | goto bad_inode; | 
| Aneesh Kumar K.V | 7a262f7 | 2009-03-27 16:39:58 -0400 | [diff] [blame] | 5116 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5117 | if (S_ISREG(inode->i_mode)) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5118 | inode->i_op = &ext4_file_inode_operations; | 
|  | 5119 | inode->i_fop = &ext4_file_operations; | 
|  | 5120 | ext4_set_aops(inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5121 | } else if (S_ISDIR(inode->i_mode)) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5122 | inode->i_op = &ext4_dir_inode_operations; | 
|  | 5123 | inode->i_fop = &ext4_dir_operations; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5124 | } else if (S_ISLNK(inode->i_mode)) { | 
| Duane Griffin | e83c139 | 2008-12-19 20:47:15 +0000 | [diff] [blame] | 5125 | if (ext4_inode_is_fast_symlink(inode)) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5126 | inode->i_op = &ext4_fast_symlink_inode_operations; | 
| Duane Griffin | e83c139 | 2008-12-19 20:47:15 +0000 | [diff] [blame] | 5127 | nd_terminate_link(ei->i_data, inode->i_size, | 
|  | 5128 | sizeof(ei->i_data) - 1); | 
|  | 5129 | } else { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5130 | inode->i_op = &ext4_symlink_inode_operations; | 
|  | 5131 | ext4_set_aops(inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5132 | } | 
| Theodore Ts'o | 563bdd6 | 2009-03-26 00:06:19 -0400 | [diff] [blame] | 5133 | } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || | 
|  | 5134 | S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5135 | inode->i_op = &ext4_special_inode_operations; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5136 | if (raw_inode->i_block[0]) | 
|  | 5137 | init_special_inode(inode, inode->i_mode, | 
|  | 5138 | old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); | 
|  | 5139 | else | 
|  | 5140 | init_special_inode(inode, inode->i_mode, | 
|  | 5141 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 
| Theodore Ts'o | 563bdd6 | 2009-03-26 00:06:19 -0400 | [diff] [blame] | 5142 | } else { | 
| Theodore Ts'o | 563bdd6 | 2009-03-26 00:06:19 -0400 | [diff] [blame] | 5143 | ret = -EIO; | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 5144 | ext4_error(inode->i_sb, "bogus i_mode (%o) for inode=%lu", | 
| Theodore Ts'o | 563bdd6 | 2009-03-26 00:06:19 -0400 | [diff] [blame] | 5145 | inode->i_mode, inode->i_ino); | 
|  | 5146 | goto bad_inode; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5147 | } | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 5148 | brelse(iloc.bh); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5149 | ext4_set_inode_flags(inode); | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 5150 | unlock_new_inode(inode); | 
|  | 5151 | return inode; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5152 |  | 
|  | 5153 | bad_inode: | 
| Theodore Ts'o | 567f3e9 | 2009-11-14 08:19:05 -0500 | [diff] [blame] | 5154 | brelse(iloc.bh); | 
| David Howells | 1d1fe1e | 2008-02-07 00:15:37 -0800 | [diff] [blame] | 5155 | iget_failed(inode); | 
|  | 5156 | return ERR_PTR(ret); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5157 | } | 
|  | 5158 |  | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5159 | static int ext4_inode_blocks_set(handle_t *handle, | 
|  | 5160 | struct ext4_inode *raw_inode, | 
|  | 5161 | struct ext4_inode_info *ei) | 
|  | 5162 | { | 
|  | 5163 | struct inode *inode = &(ei->vfs_inode); | 
|  | 5164 | u64 i_blocks = inode->i_blocks; | 
|  | 5165 | struct super_block *sb = inode->i_sb; | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5166 |  | 
|  | 5167 | if (i_blocks <= ~0U) { | 
|  | 5168 | /* | 
|  | 5169 | * i_blocks can be represnted in a 32 bit variable | 
|  | 5170 | * as multiple of 512 bytes | 
|  | 5171 | */ | 
| Aneesh Kumar K.V | 8180a56 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5172 | raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks); | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5173 | raw_inode->i_blocks_high = 0; | 
| Aneesh Kumar K.V | 8180a56 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5174 | ei->i_flags &= ~EXT4_HUGE_FILE_FL; | 
| Theodore Ts'o | f287a1a | 2008-10-16 22:50:48 -0400 | [diff] [blame] | 5175 | return 0; | 
|  | 5176 | } | 
|  | 5177 | if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) | 
|  | 5178 | return -EFBIG; | 
|  | 5179 |  | 
|  | 5180 | if (i_blocks <= 0xffffffffffffULL) { | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5181 | /* | 
|  | 5182 | * i_blocks can be represented in a 48 bit variable | 
|  | 5183 | * as multiple of 512 bytes | 
|  | 5184 | */ | 
| Aneesh Kumar K.V | 8180a56 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5185 | raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks); | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5186 | raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); | 
| Aneesh Kumar K.V | 8180a56 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5187 | ei->i_flags &= ~EXT4_HUGE_FILE_FL; | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5188 | } else { | 
| Aneesh Kumar K.V | 8180a56 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5189 | ei->i_flags |= EXT4_HUGE_FILE_FL; | 
|  | 5190 | /* i_block is stored in file system block size */ | 
|  | 5191 | i_blocks = i_blocks >> (inode->i_blkbits - 9); | 
|  | 5192 | raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks); | 
|  | 5193 | raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5194 | } | 
| Theodore Ts'o | f287a1a | 2008-10-16 22:50:48 -0400 | [diff] [blame] | 5195 | return 0; | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5196 | } | 
|  | 5197 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5198 | /* | 
|  | 5199 | * Post the struct inode info into an on-disk inode location in the | 
|  | 5200 | * buffer-cache.  This gobbles the caller's reference to the | 
|  | 5201 | * buffer_head in the inode location struct. | 
|  | 5202 | * | 
|  | 5203 | * The caller must have write access to iloc->bh. | 
|  | 5204 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5205 | static int ext4_do_update_inode(handle_t *handle, | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5206 | struct inode *inode, | 
| Frank Mayhar | 830156c | 2009-09-29 10:07:47 -0400 | [diff] [blame] | 5207 | struct ext4_iloc *iloc) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5208 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5209 | struct ext4_inode *raw_inode = ext4_raw_inode(iloc); | 
|  | 5210 | struct ext4_inode_info *ei = EXT4_I(inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5211 | struct buffer_head *bh = iloc->bh; | 
|  | 5212 | int err = 0, rc, block; | 
|  | 5213 |  | 
|  | 5214 | /* For fields not not tracking in the in-memory inode, | 
|  | 5215 | * initialise them to zero for new inodes. */ | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 5216 | if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5217 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5218 |  | 
| Jan Kara | ff9ddf7 | 2007-07-18 09:24:20 -0400 | [diff] [blame] | 5219 | ext4_get_inode_flags(ei); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5220 | raw_inode->i_mode = cpu_to_le16(inode->i_mode); | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 5221 | if (!(test_opt(inode->i_sb, NO_UID32))) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5222 | raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); | 
|  | 5223 | raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); | 
|  | 5224 | /* | 
|  | 5225 | * Fix up interoperability with old kernels. Otherwise, old inodes get | 
|  | 5226 | * re-used with the upper 16 bits of the uid/gid intact | 
|  | 5227 | */ | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 5228 | if (!ei->i_dtime) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5229 | raw_inode->i_uid_high = | 
|  | 5230 | cpu_to_le16(high_16_bits(inode->i_uid)); | 
|  | 5231 | raw_inode->i_gid_high = | 
|  | 5232 | cpu_to_le16(high_16_bits(inode->i_gid)); | 
|  | 5233 | } else { | 
|  | 5234 | raw_inode->i_uid_high = 0; | 
|  | 5235 | raw_inode->i_gid_high = 0; | 
|  | 5236 | } | 
|  | 5237 | } else { | 
|  | 5238 | raw_inode->i_uid_low = | 
|  | 5239 | cpu_to_le16(fs_high2lowuid(inode->i_uid)); | 
|  | 5240 | raw_inode->i_gid_low = | 
|  | 5241 | cpu_to_le16(fs_high2lowgid(inode->i_gid)); | 
|  | 5242 | raw_inode->i_uid_high = 0; | 
|  | 5243 | raw_inode->i_gid_high = 0; | 
|  | 5244 | } | 
|  | 5245 | raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); | 
| Kalpak Shah | ef7f383 | 2007-07-18 09:15:20 -0400 | [diff] [blame] | 5246 |  | 
|  | 5247 | EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); | 
|  | 5248 | EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); | 
|  | 5249 | EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); | 
|  | 5250 | EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); | 
|  | 5251 |  | 
| Aneesh Kumar K.V | 0fc1b45 | 2008-01-28 23:58:26 -0500 | [diff] [blame] | 5252 | if (ext4_inode_blocks_set(handle, raw_inode, ei)) | 
|  | 5253 | goto out_brelse; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5254 | raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); | 
| Theodore Ts'o | 1b9c12f | 2009-09-17 08:32:22 -0400 | [diff] [blame] | 5255 | raw_inode->i_flags = cpu_to_le32(ei->i_flags); | 
| Mingming Cao | 9b8f1f0 | 2006-10-11 01:21:13 -0700 | [diff] [blame] | 5256 | if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != | 
|  | 5257 | cpu_to_le32(EXT4_OS_HURD)) | 
| Badari Pulavarty | a1ddeb7 | 2006-10-11 01:21:09 -0700 | [diff] [blame] | 5258 | raw_inode->i_file_acl_high = | 
|  | 5259 | cpu_to_le16(ei->i_file_acl >> 32); | 
| Aneesh Kumar K.V | 7973c0c | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5260 | raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); | 
| Aneesh Kumar K.V | a48380f | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5261 | ext4_isize_set(raw_inode, ei->i_disksize); | 
|  | 5262 | if (ei->i_disksize > 0x7fffffffULL) { | 
|  | 5263 | struct super_block *sb = inode->i_sb; | 
|  | 5264 | if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, | 
|  | 5265 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || | 
|  | 5266 | EXT4_SB(sb)->s_es->s_rev_level == | 
|  | 5267 | cpu_to_le32(EXT4_GOOD_OLD_REV)) { | 
|  | 5268 | /* If this is the first large file | 
|  | 5269 | * created, add a flag to the superblock. | 
|  | 5270 | */ | 
|  | 5271 | err = ext4_journal_get_write_access(handle, | 
|  | 5272 | EXT4_SB(sb)->s_sbh); | 
|  | 5273 | if (err) | 
|  | 5274 | goto out_brelse; | 
|  | 5275 | ext4_update_dynamic_rev(sb); | 
|  | 5276 | EXT4_SET_RO_COMPAT_FEATURE(sb, | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5277 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE); | 
| Aneesh Kumar K.V | a48380f | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5278 | sb->s_dirt = 1; | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 5279 | ext4_handle_sync(handle); | 
| Curt Wohlgemuth | 73b50c1 | 2010-02-16 15:06:29 -0500 | [diff] [blame] | 5280 | err = ext4_handle_dirty_metadata(handle, NULL, | 
| Aneesh Kumar K.V | a48380f | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5281 | EXT4_SB(sb)->s_sbh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5282 | } | 
|  | 5283 | } | 
|  | 5284 | raw_inode->i_generation = cpu_to_le32(inode->i_generation); | 
|  | 5285 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { | 
|  | 5286 | if (old_valid_dev(inode->i_rdev)) { | 
|  | 5287 | raw_inode->i_block[0] = | 
|  | 5288 | cpu_to_le32(old_encode_dev(inode->i_rdev)); | 
|  | 5289 | raw_inode->i_block[1] = 0; | 
|  | 5290 | } else { | 
|  | 5291 | raw_inode->i_block[0] = 0; | 
|  | 5292 | raw_inode->i_block[1] = | 
|  | 5293 | cpu_to_le32(new_encode_dev(inode->i_rdev)); | 
|  | 5294 | raw_inode->i_block[2] = 0; | 
|  | 5295 | } | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 5296 | } else | 
|  | 5297 | for (block = 0; block < EXT4_N_BLOCKS; block++) | 
|  | 5298 | raw_inode->i_block[block] = ei->i_data[block]; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5299 |  | 
| Jean Noel Cordenner | 25ec56b | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5300 | raw_inode->i_disk_version = cpu_to_le32(inode->i_version); | 
|  | 5301 | if (ei->i_extra_isize) { | 
|  | 5302 | if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) | 
|  | 5303 | raw_inode->i_version_hi = | 
|  | 5304 | cpu_to_le32(inode->i_version >> 32); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5305 | raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); | 
| Jean Noel Cordenner | 25ec56b | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5306 | } | 
|  | 5307 |  | 
| Frank Mayhar | 830156c | 2009-09-29 10:07:47 -0400 | [diff] [blame] | 5308 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 
| Curt Wohlgemuth | 73b50c1 | 2010-02-16 15:06:29 -0500 | [diff] [blame] | 5309 | rc = ext4_handle_dirty_metadata(handle, NULL, bh); | 
| Frank Mayhar | 830156c | 2009-09-29 10:07:47 -0400 | [diff] [blame] | 5310 | if (!err) | 
|  | 5311 | err = rc; | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 5312 | ext4_clear_inode_state(inode, EXT4_STATE_NEW); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5313 |  | 
| Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 5314 | ext4_update_inode_fsync_trans(handle, inode, 0); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5315 | out_brelse: | 
| Theodore Ts'o | af5bc92 | 2008-09-08 22:25:24 -0400 | [diff] [blame] | 5316 | brelse(bh); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5317 | ext4_std_error(inode->i_sb, err); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5318 | return err; | 
|  | 5319 | } | 
|  | 5320 |  | 
|  | 5321 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5322 | * ext4_write_inode() | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5323 | * | 
|  | 5324 | * We are called from a few places: | 
|  | 5325 | * | 
|  | 5326 | * - Within generic_file_write() for O_SYNC files. | 
|  | 5327 | *   Here, there will be no transaction running. We wait for any running | 
|  | 5328 | *   trasnaction to commit. | 
|  | 5329 | * | 
|  | 5330 | * - Within sys_sync(), kupdate and such. | 
|  | 5331 | *   We wait on commit, if tol to. | 
|  | 5332 | * | 
|  | 5333 | * - Within prune_icache() (PF_MEMALLOC == true) | 
|  | 5334 | *   Here we simply return.  We can't afford to block kswapd on the | 
|  | 5335 | *   journal commit. | 
|  | 5336 | * | 
|  | 5337 | * In all cases it is actually safe for us to return without doing anything, | 
|  | 5338 | * because the inode has been copied into a raw inode buffer in | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5339 | * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5340 | * knfsd. | 
|  | 5341 | * | 
|  | 5342 | * Note that we are absolutely dependent upon all inode dirtiers doing the | 
|  | 5343 | * right thing: they *must* call mark_inode_dirty() after dirtying info in | 
|  | 5344 | * which we are interested. | 
|  | 5345 | * | 
|  | 5346 | * It would be a bug for them to not do this.  The code: | 
|  | 5347 | * | 
|  | 5348 | *	mark_inode_dirty(inode) | 
|  | 5349 | *	stuff(); | 
|  | 5350 | *	inode->i_size = expr; | 
|  | 5351 | * | 
|  | 5352 | * is in error because a kswapd-driven write_inode() could occur while | 
|  | 5353 | * `stuff()' is running, and the new i_size will be lost.  Plus the inode | 
|  | 5354 | * will no longer be on the superblock's dirty inode list. | 
|  | 5355 | */ | 
| Christoph Hellwig | a9185b4 | 2010-03-05 09:21:37 +0100 | [diff] [blame] | 5356 | int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5357 | { | 
| Frank Mayhar | 91ac6f4 | 2009-09-09 22:33:47 -0400 | [diff] [blame] | 5358 | int err; | 
|  | 5359 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5360 | if (current->flags & PF_MEMALLOC) | 
|  | 5361 | return 0; | 
|  | 5362 |  | 
| Frank Mayhar | 91ac6f4 | 2009-09-09 22:33:47 -0400 | [diff] [blame] | 5363 | if (EXT4_SB(inode->i_sb)->s_journal) { | 
|  | 5364 | if (ext4_journal_current_handle()) { | 
|  | 5365 | jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); | 
|  | 5366 | dump_stack(); | 
|  | 5367 | return -EIO; | 
|  | 5368 | } | 
|  | 5369 |  | 
| Christoph Hellwig | a9185b4 | 2010-03-05 09:21:37 +0100 | [diff] [blame] | 5370 | if (wbc->sync_mode != WB_SYNC_ALL) | 
| Frank Mayhar | 91ac6f4 | 2009-09-09 22:33:47 -0400 | [diff] [blame] | 5371 | return 0; | 
|  | 5372 |  | 
|  | 5373 | err = ext4_force_commit(inode->i_sb); | 
|  | 5374 | } else { | 
|  | 5375 | struct ext4_iloc iloc; | 
|  | 5376 |  | 
|  | 5377 | err = ext4_get_inode_loc(inode, &iloc); | 
|  | 5378 | if (err) | 
|  | 5379 | return err; | 
| Christoph Hellwig | a9185b4 | 2010-03-05 09:21:37 +0100 | [diff] [blame] | 5380 | if (wbc->sync_mode == WB_SYNC_ALL) | 
| Frank Mayhar | 830156c | 2009-09-29 10:07:47 -0400 | [diff] [blame] | 5381 | sync_dirty_buffer(iloc.bh); | 
|  | 5382 | if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 5383 | ext4_error(inode->i_sb, "IO error syncing inode, " | 
|  | 5384 | "inode=%lu, block=%llu", inode->i_ino, | 
| Frank Mayhar | 830156c | 2009-09-29 10:07:47 -0400 | [diff] [blame] | 5385 | (unsigned long long)iloc.bh->b_blocknr); | 
|  | 5386 | err = -EIO; | 
|  | 5387 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5388 | } | 
| Frank Mayhar | 91ac6f4 | 2009-09-09 22:33:47 -0400 | [diff] [blame] | 5389 | return err; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5390 | } | 
|  | 5391 |  | 
|  | 5392 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5393 | * ext4_setattr() | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5394 | * | 
|  | 5395 | * Called from notify_change. | 
|  | 5396 | * | 
|  | 5397 | * We want to trap VFS attempts to truncate the file as soon as | 
|  | 5398 | * possible.  In particular, we want to make sure that when the VFS | 
|  | 5399 | * shrinks i_size, we put the inode on the orphan list and modify | 
|  | 5400 | * i_disksize immediately, so that during the subsequent flushing of | 
|  | 5401 | * dirty pages and freeing of disk blocks, we can guarantee that any | 
|  | 5402 | * commit will leave the blocks being flushed in an unused state on | 
|  | 5403 | * disk.  (On recovery, the inode will get truncated and the blocks will | 
|  | 5404 | * be freed, so we have a strong guarantee that no future commit will | 
|  | 5405 | * leave these blocks visible to the user.) | 
|  | 5406 | * | 
| Jan Kara | 678aaf4 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5407 | * Another thing we have to assure is that if we are in ordered mode | 
|  | 5408 | * and inode is still attached to the committing transaction, we must | 
|  | 5409 | * we start writeout of all the dirty pages which are being truncated. | 
|  | 5410 | * This way we are sure that all the data written in the previous | 
|  | 5411 | * transaction are already on disk (truncate waits for pages under | 
|  | 5412 | * writeback). | 
|  | 5413 | * | 
|  | 5414 | * Called with inode->i_mutex down. | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5415 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5416 | int ext4_setattr(struct dentry *dentry, struct iattr *attr) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5417 | { | 
|  | 5418 | struct inode *inode = dentry->d_inode; | 
|  | 5419 | int error, rc = 0; | 
|  | 5420 | const unsigned int ia_valid = attr->ia_valid; | 
|  | 5421 |  | 
|  | 5422 | error = inode_change_ok(inode, attr); | 
|  | 5423 | if (error) | 
|  | 5424 | return error; | 
|  | 5425 |  | 
| Christoph Hellwig | 907f455 | 2010-03-03 09:05:06 -0500 | [diff] [blame] | 5426 | if (ia_valid & ATTR_SIZE) | 
| Christoph Hellwig | 871a293 | 2010-03-03 09:05:07 -0500 | [diff] [blame] | 5427 | dquot_initialize(inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5428 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || | 
|  | 5429 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { | 
|  | 5430 | handle_t *handle; | 
|  | 5431 |  | 
|  | 5432 | /* (user+group)*(old+new) structure, inode write (sb, | 
|  | 5433 | * inode block, ? - but truncate inode update has it) */ | 
| Dmitry Monakhov | 5aca07e | 2009-12-08 22:42:15 -0500 | [diff] [blame] | 5434 | handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ | 
| Dmitry Monakhov | 194074a | 2009-12-08 22:42:28 -0500 | [diff] [blame] | 5435 | EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5436 | if (IS_ERR(handle)) { | 
|  | 5437 | error = PTR_ERR(handle); | 
|  | 5438 | goto err_out; | 
|  | 5439 | } | 
| Christoph Hellwig | b43fa82 | 2010-03-03 09:05:03 -0500 | [diff] [blame] | 5440 | error = dquot_transfer(inode, attr); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5441 | if (error) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5442 | ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5443 | return error; | 
|  | 5444 | } | 
|  | 5445 | /* Update corresponding info in inode so that everything is in | 
|  | 5446 | * one transaction */ | 
|  | 5447 | if (attr->ia_valid & ATTR_UID) | 
|  | 5448 | inode->i_uid = attr->ia_uid; | 
|  | 5449 | if (attr->ia_valid & ATTR_GID) | 
|  | 5450 | inode->i_gid = attr->ia_gid; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5451 | error = ext4_mark_inode_dirty(handle, inode); | 
|  | 5452 | ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5453 | } | 
|  | 5454 |  | 
| Eric Sandeen | e2b4657 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5455 | if (attr->ia_valid & ATTR_SIZE) { | 
|  | 5456 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { | 
|  | 5457 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 
|  | 5458 |  | 
|  | 5459 | if (attr->ia_size > sbi->s_bitmap_maxbytes) { | 
|  | 5460 | error = -EFBIG; | 
|  | 5461 | goto err_out; | 
|  | 5462 | } | 
|  | 5463 | } | 
|  | 5464 | } | 
|  | 5465 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5466 | if (S_ISREG(inode->i_mode) && | 
| Jiaying Zhang | c8d46e4 | 2010-02-24 09:52:53 -0500 | [diff] [blame] | 5467 | attr->ia_valid & ATTR_SIZE && | 
|  | 5468 | (attr->ia_size < inode->i_size || | 
|  | 5469 | (EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))) { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5470 | handle_t *handle; | 
|  | 5471 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5472 | handle = ext4_journal_start(inode, 3); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5473 | if (IS_ERR(handle)) { | 
|  | 5474 | error = PTR_ERR(handle); | 
|  | 5475 | goto err_out; | 
|  | 5476 | } | 
|  | 5477 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5478 | error = ext4_orphan_add(handle, inode); | 
|  | 5479 | EXT4_I(inode)->i_disksize = attr->ia_size; | 
|  | 5480 | rc = ext4_mark_inode_dirty(handle, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5481 | if (!error) | 
|  | 5482 | error = rc; | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5483 | ext4_journal_stop(handle); | 
| Jan Kara | 678aaf4 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5484 |  | 
|  | 5485 | if (ext4_should_order_data(inode)) { | 
|  | 5486 | error = ext4_begin_ordered_truncate(inode, | 
|  | 5487 | attr->ia_size); | 
|  | 5488 | if (error) { | 
|  | 5489 | /* Do as much error cleanup as possible */ | 
|  | 5490 | handle = ext4_journal_start(inode, 3); | 
|  | 5491 | if (IS_ERR(handle)) { | 
|  | 5492 | ext4_orphan_del(NULL, inode); | 
|  | 5493 | goto err_out; | 
|  | 5494 | } | 
|  | 5495 | ext4_orphan_del(handle, inode); | 
|  | 5496 | ext4_journal_stop(handle); | 
|  | 5497 | goto err_out; | 
|  | 5498 | } | 
|  | 5499 | } | 
| Jiaying Zhang | c8d46e4 | 2010-02-24 09:52:53 -0500 | [diff] [blame] | 5500 | /* ext4_truncate will clear the flag */ | 
|  | 5501 | if ((EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) | 
|  | 5502 | ext4_truncate(inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5503 | } | 
|  | 5504 |  | 
|  | 5505 | rc = inode_setattr(inode, attr); | 
|  | 5506 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5507 | /* If inode_setattr's call to ext4_truncate failed to get a | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5508 | * transaction handle at all, we need to clean up the in-core | 
|  | 5509 | * orphan list manually. */ | 
|  | 5510 | if (inode->i_nlink) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5511 | ext4_orphan_del(NULL, inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5512 |  | 
|  | 5513 | if (!rc && (ia_valid & ATTR_MODE)) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5514 | rc = ext4_acl_chmod(inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5515 |  | 
|  | 5516 | err_out: | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5517 | ext4_std_error(inode->i_sb, error); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5518 | if (!error) | 
|  | 5519 | error = rc; | 
|  | 5520 | return error; | 
|  | 5521 | } | 
|  | 5522 |  | 
| Mingming Cao | 3e3398a | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5523 | int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, | 
|  | 5524 | struct kstat *stat) | 
|  | 5525 | { | 
|  | 5526 | struct inode *inode; | 
|  | 5527 | unsigned long delalloc_blocks; | 
|  | 5528 |  | 
|  | 5529 | inode = dentry->d_inode; | 
|  | 5530 | generic_fillattr(inode, stat); | 
|  | 5531 |  | 
|  | 5532 | /* | 
|  | 5533 | * We can't update i_blocks if the block allocation is delayed | 
|  | 5534 | * otherwise in the case of system crash before the real block | 
|  | 5535 | * allocation is done, we will have i_blocks inconsistent with | 
|  | 5536 | * on-disk file blocks. | 
|  | 5537 | * We always keep i_blocks updated together with real | 
|  | 5538 | * allocation. But to not confuse with user, stat | 
|  | 5539 | * will return the blocks that include the delayed allocation | 
|  | 5540 | * blocks for this file. | 
|  | 5541 | */ | 
|  | 5542 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | 
|  | 5543 | delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; | 
|  | 5544 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 
|  | 5545 |  | 
|  | 5546 | stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; | 
|  | 5547 | return 0; | 
|  | 5548 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5549 |  | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5550 | static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, | 
|  | 5551 | int chunk) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5552 | { | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5553 | int indirects; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5554 |  | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5555 | /* if nrblocks are contiguous */ | 
|  | 5556 | if (chunk) { | 
|  | 5557 | /* | 
|  | 5558 | * With N contiguous data blocks, it need at most | 
|  | 5559 | * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks | 
|  | 5560 | * 2 dindirect blocks | 
|  | 5561 | * 1 tindirect block | 
|  | 5562 | */ | 
|  | 5563 | indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); | 
|  | 5564 | return indirects + 3; | 
|  | 5565 | } | 
|  | 5566 | /* | 
|  | 5567 | * if nrblocks are not contiguous, worse case, each block touch | 
|  | 5568 | * a indirect block, and each indirect block touch a double indirect | 
|  | 5569 | * block, plus a triple indirect block | 
|  | 5570 | */ | 
|  | 5571 | indirects = nrblocks * 2 + 1; | 
|  | 5572 | return indirects; | 
|  | 5573 | } | 
| Alex Tomas | a86c618 | 2006-10-11 01:21:03 -0700 | [diff] [blame] | 5574 |  | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5575 | static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) | 
|  | 5576 | { | 
|  | 5577 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) | 
| Theodore Ts'o | ac51d83 | 2008-11-06 16:49:36 -0500 | [diff] [blame] | 5578 | return ext4_indirect_trans_blocks(inode, nrblocks, chunk); | 
|  | 5579 | return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5580 | } | 
| Theodore Ts'o | ac51d83 | 2008-11-06 16:49:36 -0500 | [diff] [blame] | 5581 |  | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5582 | /* | 
|  | 5583 | * Account for index blocks, block groups bitmaps and block group | 
|  | 5584 | * descriptor blocks if modify datablocks and index blocks | 
|  | 5585 | * worse case, the indexs blocks spread over different block groups | 
|  | 5586 | * | 
|  | 5587 | * If datablocks are discontiguous, they are possible to spread over | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 5588 | * different block groups too. If they are contiuguous, with flexbg, | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5589 | * they could still across block group boundary. | 
|  | 5590 | * | 
|  | 5591 | * Also account for superblock, inode, quota and xattr blocks | 
|  | 5592 | */ | 
|  | 5593 | int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) | 
|  | 5594 | { | 
| Theodore Ts'o | 8df9675 | 2009-05-01 08:50:38 -0400 | [diff] [blame] | 5595 | ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); | 
|  | 5596 | int gdpblocks; | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5597 | int idxblocks; | 
|  | 5598 | int ret = 0; | 
|  | 5599 |  | 
|  | 5600 | /* | 
|  | 5601 | * How many index blocks need to touch to modify nrblocks? | 
|  | 5602 | * The "Chunk" flag indicating whether the nrblocks is | 
|  | 5603 | * physically contiguous on disk | 
|  | 5604 | * | 
|  | 5605 | * For Direct IO and fallocate, they calls get_block to allocate | 
|  | 5606 | * one single extent at a time, so they could set the "Chunk" flag | 
|  | 5607 | */ | 
|  | 5608 | idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); | 
|  | 5609 |  | 
|  | 5610 | ret = idxblocks; | 
|  | 5611 |  | 
|  | 5612 | /* | 
|  | 5613 | * Now let's see how many group bitmaps and group descriptors need | 
|  | 5614 | * to account | 
|  | 5615 | */ | 
|  | 5616 | groups = idxblocks; | 
|  | 5617 | if (chunk) | 
|  | 5618 | groups += 1; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5619 | else | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5620 | groups += nrblocks; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5621 |  | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5622 | gdpblocks = groups; | 
| Theodore Ts'o | 8df9675 | 2009-05-01 08:50:38 -0400 | [diff] [blame] | 5623 | if (groups > ngroups) | 
|  | 5624 | groups = ngroups; | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5625 | if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) | 
|  | 5626 | gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; | 
|  | 5627 |  | 
|  | 5628 | /* bitmaps and block group descriptor blocks */ | 
|  | 5629 | ret += groups + gdpblocks; | 
|  | 5630 |  | 
|  | 5631 | /* Blocks for super block, inode, quota and xattr blocks */ | 
|  | 5632 | ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5633 |  | 
|  | 5634 | return ret; | 
|  | 5635 | } | 
|  | 5636 |  | 
|  | 5637 | /* | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5638 | * Calulate the total number of credits to reserve to fit | 
| Mingming Cao | f3bd1f3 | 2008-08-19 22:16:03 -0400 | [diff] [blame] | 5639 | * the modification of a single pages into a single transaction, | 
|  | 5640 | * which may include multiple chunks of block allocations. | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5641 | * | 
| Mingming Cao | 525f4ed | 2008-08-19 22:15:58 -0400 | [diff] [blame] | 5642 | * This could be called via ext4_write_begin() | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5643 | * | 
| Mingming Cao | 525f4ed | 2008-08-19 22:15:58 -0400 | [diff] [blame] | 5644 | * We need to consider the worse case, when | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5645 | * one new block per extent. | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5646 | */ | 
|  | 5647 | int ext4_writepage_trans_blocks(struct inode *inode) | 
|  | 5648 | { | 
|  | 5649 | int bpp = ext4_journal_blocks_per_page(inode); | 
|  | 5650 | int ret; | 
|  | 5651 |  | 
|  | 5652 | ret = ext4_meta_trans_blocks(inode, bpp, 0); | 
|  | 5653 |  | 
|  | 5654 | /* Account for data blocks for journalled mode */ | 
|  | 5655 | if (ext4_should_journal_data(inode)) | 
|  | 5656 | ret += bpp; | 
|  | 5657 | return ret; | 
|  | 5658 | } | 
| Mingming Cao | f3bd1f3 | 2008-08-19 22:16:03 -0400 | [diff] [blame] | 5659 |  | 
|  | 5660 | /* | 
|  | 5661 | * Calculate the journal credits for a chunk of data modification. | 
|  | 5662 | * | 
|  | 5663 | * This is called from DIO, fallocate or whoever calling | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 5664 | * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks. | 
| Mingming Cao | f3bd1f3 | 2008-08-19 22:16:03 -0400 | [diff] [blame] | 5665 | * | 
|  | 5666 | * journal buffers for data blocks are not included here, as DIO | 
|  | 5667 | * and fallocate do no need to journal data buffers. | 
|  | 5668 | */ | 
|  | 5669 | int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) | 
|  | 5670 | { | 
|  | 5671 | return ext4_meta_trans_blocks(inode, nrblocks, 1); | 
|  | 5672 | } | 
|  | 5673 |  | 
| Mingming Cao | a02908f | 2008-08-19 22:16:07 -0400 | [diff] [blame] | 5674 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5675 | * The caller must have previously called ext4_reserve_inode_write(). | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5676 | * Give this, we know that the caller already has write access to iloc->bh. | 
|  | 5677 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5678 | int ext4_mark_iloc_dirty(handle_t *handle, | 
| Theodore Ts'o | de9a55b | 2009-06-14 17:45:34 -0400 | [diff] [blame] | 5679 | struct inode *inode, struct ext4_iloc *iloc) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5680 | { | 
|  | 5681 | int err = 0; | 
|  | 5682 |  | 
| Jean Noel Cordenner | 25ec56b | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5683 | if (test_opt(inode->i_sb, I_VERSION)) | 
|  | 5684 | inode_inc_iversion(inode); | 
|  | 5685 |  | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5686 | /* the do_update_inode consumes one bh->b_count */ | 
|  | 5687 | get_bh(iloc->bh); | 
|  | 5688 |  | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 5689 | /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ | 
| Frank Mayhar | 830156c | 2009-09-29 10:07:47 -0400 | [diff] [blame] | 5690 | err = ext4_do_update_inode(handle, inode, iloc); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5691 | put_bh(iloc->bh); | 
|  | 5692 | return err; | 
|  | 5693 | } | 
|  | 5694 |  | 
|  | 5695 | /* | 
|  | 5696 | * On success, We end up with an outstanding reference count against | 
|  | 5697 | * iloc->bh.  This _must_ be cleaned up later. | 
|  | 5698 | */ | 
|  | 5699 |  | 
|  | 5700 | int | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5701 | ext4_reserve_inode_write(handle_t *handle, struct inode *inode, | 
|  | 5702 | struct ext4_iloc *iloc) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5703 | { | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 5704 | int err; | 
|  | 5705 |  | 
|  | 5706 | err = ext4_get_inode_loc(inode, iloc); | 
|  | 5707 | if (!err) { | 
|  | 5708 | BUFFER_TRACE(iloc->bh, "get_write_access"); | 
|  | 5709 | err = ext4_journal_get_write_access(handle, iloc->bh); | 
|  | 5710 | if (err) { | 
|  | 5711 | brelse(iloc->bh); | 
|  | 5712 | iloc->bh = NULL; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5713 | } | 
|  | 5714 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5715 | ext4_std_error(inode->i_sb, err); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5716 | return err; | 
|  | 5717 | } | 
|  | 5718 |  | 
|  | 5719 | /* | 
| Kalpak Shah | 6dd4ee7 | 2007-07-18 09:19:57 -0400 | [diff] [blame] | 5720 | * Expand an inode by new_extra_isize bytes. | 
|  | 5721 | * Returns 0 on success or negative error number on failure. | 
|  | 5722 | */ | 
| Aneesh Kumar K.V | 1d03ec9 | 2008-01-28 23:58:27 -0500 | [diff] [blame] | 5723 | static int ext4_expand_extra_isize(struct inode *inode, | 
|  | 5724 | unsigned int new_extra_isize, | 
|  | 5725 | struct ext4_iloc iloc, | 
|  | 5726 | handle_t *handle) | 
| Kalpak Shah | 6dd4ee7 | 2007-07-18 09:19:57 -0400 | [diff] [blame] | 5727 | { | 
|  | 5728 | struct ext4_inode *raw_inode; | 
|  | 5729 | struct ext4_xattr_ibody_header *header; | 
|  | 5730 | struct ext4_xattr_entry *entry; | 
|  | 5731 |  | 
|  | 5732 | if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) | 
|  | 5733 | return 0; | 
|  | 5734 |  | 
|  | 5735 | raw_inode = ext4_raw_inode(&iloc); | 
|  | 5736 |  | 
|  | 5737 | header = IHDR(inode, raw_inode); | 
|  | 5738 | entry = IFIRST(header); | 
|  | 5739 |  | 
|  | 5740 | /* No extended attributes present */ | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 5741 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || | 
|  | 5742 | header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { | 
| Kalpak Shah | 6dd4ee7 | 2007-07-18 09:19:57 -0400 | [diff] [blame] | 5743 | memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, | 
|  | 5744 | new_extra_isize); | 
|  | 5745 | EXT4_I(inode)->i_extra_isize = new_extra_isize; | 
|  | 5746 | return 0; | 
|  | 5747 | } | 
|  | 5748 |  | 
|  | 5749 | /* try to expand with EAs present */ | 
|  | 5750 | return ext4_expand_extra_isize_ea(inode, new_extra_isize, | 
|  | 5751 | raw_inode, handle); | 
|  | 5752 | } | 
|  | 5753 |  | 
|  | 5754 | /* | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5755 | * What we do here is to mark the in-core inode as clean with respect to inode | 
|  | 5756 | * dirtiness (it may still be data-dirty). | 
|  | 5757 | * This means that the in-core inode may be reaped by prune_icache | 
|  | 5758 | * without having to perform any I/O.  This is a very good thing, | 
|  | 5759 | * because *any* task may call prune_icache - even ones which | 
|  | 5760 | * have a transaction open against a different journal. | 
|  | 5761 | * | 
|  | 5762 | * Is this cheating?  Not really.  Sure, we haven't written the | 
|  | 5763 | * inode out, but prune_icache isn't a user-visible syncing function. | 
|  | 5764 | * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) | 
|  | 5765 | * we start and wait on commits. | 
|  | 5766 | * | 
|  | 5767 | * Is this efficient/effective?  Well, we're being nice to the system | 
|  | 5768 | * by cleaning up our inodes proactively so they can be reaped | 
|  | 5769 | * without I/O.  But we are potentially leaving up to five seconds' | 
|  | 5770 | * worth of inodes floating about which prune_icache wants us to | 
|  | 5771 | * write out.  One way to fix that would be to get prune_icache() | 
|  | 5772 | * to do a write_super() to free up some memory.  It has the desired | 
|  | 5773 | * effect. | 
|  | 5774 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5775 | int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5776 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5777 | struct ext4_iloc iloc; | 
| Kalpak Shah | 6dd4ee7 | 2007-07-18 09:19:57 -0400 | [diff] [blame] | 5778 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 
|  | 5779 | static unsigned int mnt_count; | 
|  | 5780 | int err, ret; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5781 |  | 
|  | 5782 | might_sleep(); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5783 | err = ext4_reserve_inode_write(handle, inode, &iloc); | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 5784 | if (ext4_handle_valid(handle) && | 
|  | 5785 | EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 5786 | !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { | 
| Kalpak Shah | 6dd4ee7 | 2007-07-18 09:19:57 -0400 | [diff] [blame] | 5787 | /* | 
|  | 5788 | * We need extra buffer credits since we may write into EA block | 
|  | 5789 | * with this same handle. If journal_extend fails, then it will | 
|  | 5790 | * only result in a minor loss of functionality for that inode. | 
|  | 5791 | * If this is felt to be critical, then e2fsck should be run to | 
|  | 5792 | * force a large enough s_min_extra_isize. | 
|  | 5793 | */ | 
|  | 5794 | if ((jbd2_journal_extend(handle, | 
|  | 5795 | EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { | 
|  | 5796 | ret = ext4_expand_extra_isize(inode, | 
|  | 5797 | sbi->s_want_extra_isize, | 
|  | 5798 | iloc, handle); | 
|  | 5799 | if (ret) { | 
| Theodore Ts'o | 19f5fb7 | 2010-01-24 14:34:07 -0500 | [diff] [blame] | 5800 | ext4_set_inode_state(inode, | 
|  | 5801 | EXT4_STATE_NO_EXPAND); | 
| Aneesh Kumar K.V | c1bddad | 2007-10-16 18:38:25 -0400 | [diff] [blame] | 5802 | if (mnt_count != | 
|  | 5803 | le16_to_cpu(sbi->s_es->s_mnt_count)) { | 
| Eric Sandeen | 12062dd | 2010-02-15 14:19:27 -0500 | [diff] [blame] | 5804 | ext4_warning(inode->i_sb, | 
| Kalpak Shah | 6dd4ee7 | 2007-07-18 09:19:57 -0400 | [diff] [blame] | 5805 | "Unable to expand inode %lu. Delete" | 
|  | 5806 | " some EAs or run e2fsck.", | 
|  | 5807 | inode->i_ino); | 
| Aneesh Kumar K.V | c1bddad | 2007-10-16 18:38:25 -0400 | [diff] [blame] | 5808 | mnt_count = | 
|  | 5809 | le16_to_cpu(sbi->s_es->s_mnt_count); | 
| Kalpak Shah | 6dd4ee7 | 2007-07-18 09:19:57 -0400 | [diff] [blame] | 5810 | } | 
|  | 5811 | } | 
|  | 5812 | } | 
|  | 5813 | } | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5814 | if (!err) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5815 | err = ext4_mark_iloc_dirty(handle, inode, &iloc); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5816 | return err; | 
|  | 5817 | } | 
|  | 5818 |  | 
|  | 5819 | /* | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5820 | * ext4_dirty_inode() is called from __mark_inode_dirty() | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5821 | * | 
|  | 5822 | * We're really interested in the case where a file is being extended. | 
|  | 5823 | * i_size has been changed by generic_commit_write() and we thus need | 
|  | 5824 | * to include the updated inode in the current transaction. | 
|  | 5825 | * | 
| Christoph Hellwig | 5dd4056 | 2010-03-03 09:05:00 -0500 | [diff] [blame] | 5826 | * Also, dquot_alloc_block() will always dirty the inode when blocks | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5827 | * are allocated to the file. | 
|  | 5828 | * | 
|  | 5829 | * If the inode is marked synchronous, we don't honour that here - doing | 
|  | 5830 | * so would cause a commit on atime updates, which we don't bother doing. | 
|  | 5831 | * We handle synchronous inodes at the highest possible level. | 
|  | 5832 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5833 | void ext4_dirty_inode(struct inode *inode) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5834 | { | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5835 | handle_t *handle; | 
|  | 5836 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5837 | handle = ext4_journal_start(inode, 2); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5838 | if (IS_ERR(handle)) | 
|  | 5839 | goto out; | 
| Curt Wohlgemuth | f3dc272 | 2009-09-29 16:06:01 -0400 | [diff] [blame] | 5840 |  | 
| Curt Wohlgemuth | f3dc272 | 2009-09-29 16:06:01 -0400 | [diff] [blame] | 5841 | ext4_mark_inode_dirty(handle, inode); | 
|  | 5842 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5843 | ext4_journal_stop(handle); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5844 | out: | 
|  | 5845 | return; | 
|  | 5846 | } | 
|  | 5847 |  | 
|  | 5848 | #if 0 | 
|  | 5849 | /* | 
|  | 5850 | * Bind an inode's backing buffer_head into this transaction, to prevent | 
|  | 5851 | * it from being flushed to disk early.  Unlike | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5852 | * ext4_reserve_inode_write, this leaves behind no bh reference and | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5853 | * returns no iloc structure, so the caller needs to repeat the iloc | 
|  | 5854 | * lookup to mark the inode dirty later. | 
|  | 5855 | */ | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5856 | static int ext4_pin_inode(handle_t *handle, struct inode *inode) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5857 | { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5858 | struct ext4_iloc iloc; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5859 |  | 
|  | 5860 | int err = 0; | 
|  | 5861 | if (handle) { | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5862 | err = ext4_get_inode_loc(inode, &iloc); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5863 | if (!err) { | 
|  | 5864 | BUFFER_TRACE(iloc.bh, "get_write_access"); | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 5865 | err = jbd2_journal_get_write_access(handle, iloc.bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5866 | if (!err) | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 5867 | err = ext4_handle_dirty_metadata(handle, | 
| Curt Wohlgemuth | 73b50c1 | 2010-02-16 15:06:29 -0500 | [diff] [blame] | 5868 | NULL, | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 5869 | iloc.bh); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5870 | brelse(iloc.bh); | 
|  | 5871 | } | 
|  | 5872 | } | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5873 | ext4_std_error(inode->i_sb, err); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5874 | return err; | 
|  | 5875 | } | 
|  | 5876 | #endif | 
|  | 5877 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5878 | int ext4_change_inode_journal_flag(struct inode *inode, int val) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5879 | { | 
|  | 5880 | journal_t *journal; | 
|  | 5881 | handle_t *handle; | 
|  | 5882 | int err; | 
|  | 5883 |  | 
|  | 5884 | /* | 
|  | 5885 | * We have to be very careful here: changing a data block's | 
|  | 5886 | * journaling status dynamically is dangerous.  If we write a | 
|  | 5887 | * data block to the journal, change the status and then delete | 
|  | 5888 | * that block, we risk forgetting to revoke the old log record | 
|  | 5889 | * from the journal and so a subsequent replay can corrupt data. | 
|  | 5890 | * So, first we make sure that the journal is empty and that | 
|  | 5891 | * nobody is changing anything. | 
|  | 5892 | */ | 
|  | 5893 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5894 | journal = EXT4_JOURNAL(inode); | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 5895 | if (!journal) | 
|  | 5896 | return 0; | 
| Dave Hansen | d699594 | 2007-07-18 08:33:51 -0400 | [diff] [blame] | 5897 | if (is_journal_aborted(journal)) | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5898 | return -EROFS; | 
|  | 5899 |  | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 5900 | jbd2_journal_lock_updates(journal); | 
|  | 5901 | jbd2_journal_flush(journal); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5902 |  | 
|  | 5903 | /* | 
|  | 5904 | * OK, there are no updates running now, and all cached data is | 
|  | 5905 | * synced to disk.  We are now in a completely consistent state | 
|  | 5906 | * which doesn't have anything in the journal, and we know that | 
|  | 5907 | * no filesystem updates are running, so it is safe to modify | 
|  | 5908 | * the inode's in-core data-journaling state flag now. | 
|  | 5909 | */ | 
|  | 5910 |  | 
|  | 5911 | if (val) | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5912 | EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5913 | else | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5914 | EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; | 
|  | 5915 | ext4_set_aops(inode); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5916 |  | 
| Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 5917 | jbd2_journal_unlock_updates(journal); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5918 |  | 
|  | 5919 | /* Finally we can mark the inode as dirty. */ | 
|  | 5920 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5921 | handle = ext4_journal_start(inode, 1); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5922 | if (IS_ERR(handle)) | 
|  | 5923 | return PTR_ERR(handle); | 
|  | 5924 |  | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5925 | err = ext4_mark_inode_dirty(handle, inode); | 
| Frank Mayhar | 0390131 | 2009-01-07 00:06:22 -0500 | [diff] [blame] | 5926 | ext4_handle_sync(handle); | 
| Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 5927 | ext4_journal_stop(handle); | 
|  | 5928 | ext4_std_error(inode->i_sb, err); | 
| Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 5929 |  | 
|  | 5930 | return err; | 
|  | 5931 | } | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5932 |  | 
|  | 5933 | static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) | 
|  | 5934 | { | 
|  | 5935 | return !buffer_mapped(bh); | 
|  | 5936 | } | 
|  | 5937 |  | 
| Nick Piggin | c2ec175 | 2009-03-31 15:23:21 -0700 | [diff] [blame] | 5938 | int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5939 | { | 
| Nick Piggin | c2ec175 | 2009-03-31 15:23:21 -0700 | [diff] [blame] | 5940 | struct page *page = vmf->page; | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5941 | loff_t size; | 
|  | 5942 | unsigned long len; | 
|  | 5943 | int ret = -EINVAL; | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 5944 | void *fsdata; | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5945 | struct file *file = vma->vm_file; | 
|  | 5946 | struct inode *inode = file->f_path.dentry->d_inode; | 
|  | 5947 | struct address_space *mapping = inode->i_mapping; | 
|  | 5948 |  | 
|  | 5949 | /* | 
|  | 5950 | * Get i_alloc_sem to stop truncates messing with the inode. We cannot | 
|  | 5951 | * get i_mutex because we are already holding mmap_sem. | 
|  | 5952 | */ | 
|  | 5953 | down_read(&inode->i_alloc_sem); | 
|  | 5954 | size = i_size_read(inode); | 
|  | 5955 | if (page->mapping != mapping || size <= page_offset(page) | 
|  | 5956 | || !PageUptodate(page)) { | 
|  | 5957 | /* page got truncated from under us? */ | 
|  | 5958 | goto out_unlock; | 
|  | 5959 | } | 
|  | 5960 | ret = 0; | 
|  | 5961 | if (PageMappedToDisk(page)) | 
|  | 5962 | goto out_unlock; | 
|  | 5963 |  | 
|  | 5964 | if (page->index == size >> PAGE_CACHE_SHIFT) | 
|  | 5965 | len = size & ~PAGE_CACHE_MASK; | 
|  | 5966 | else | 
|  | 5967 | len = PAGE_CACHE_SIZE; | 
|  | 5968 |  | 
| Aneesh Kumar K.V | a827eaf | 2009-09-09 22:36:03 -0400 | [diff] [blame] | 5969 | lock_page(page); | 
|  | 5970 | /* | 
|  | 5971 | * return if we have all the buffers mapped. This avoid | 
|  | 5972 | * the need to call write_begin/write_end which does a | 
|  | 5973 | * journal_start/journal_stop which can block and take | 
|  | 5974 | * long time | 
|  | 5975 | */ | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5976 | if (page_has_buffers(page)) { | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5977 | if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, | 
| Aneesh Kumar K.V | a827eaf | 2009-09-09 22:36:03 -0400 | [diff] [blame] | 5978 | ext4_bh_unmapped)) { | 
|  | 5979 | unlock_page(page); | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5980 | goto out_unlock; | 
| Aneesh Kumar K.V | a827eaf | 2009-09-09 22:36:03 -0400 | [diff] [blame] | 5981 | } | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5982 | } | 
| Aneesh Kumar K.V | a827eaf | 2009-09-09 22:36:03 -0400 | [diff] [blame] | 5983 | unlock_page(page); | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5984 | /* | 
|  | 5985 | * OK, we need to fill the hole... Do write_begin write_end | 
|  | 5986 | * to do block allocation/reservation.We are not holding | 
|  | 5987 | * inode.i__mutex here. That allow * parallel write_begin, | 
|  | 5988 | * write_end call. lock_page prevent this from happening | 
|  | 5989 | * on the same page though | 
|  | 5990 | */ | 
|  | 5991 | ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 5992 | len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5993 | if (ret < 0) | 
|  | 5994 | goto out_unlock; | 
|  | 5995 | ret = mapping->a_ops->write_end(file, mapping, page_offset(page), | 
| Aneesh Kumar K.V | 79f0be8 | 2008-10-08 23:13:30 -0400 | [diff] [blame] | 5996 | len, len, page, fsdata); | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 5997 | if (ret < 0) | 
|  | 5998 | goto out_unlock; | 
|  | 5999 | ret = 0; | 
|  | 6000 | out_unlock: | 
| Nick Piggin | c2ec175 | 2009-03-31 15:23:21 -0700 | [diff] [blame] | 6001 | if (ret) | 
|  | 6002 | ret = VM_FAULT_SIGBUS; | 
| Aneesh Kumar K.V | 2e9ee85 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 6003 | up_read(&inode->i_alloc_sem); | 
|  | 6004 | return ret; | 
|  | 6005 | } |