| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 
|  | 3 | * All Rights Reserved. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public License as | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. | 
|  | 8 | * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License | 
|  | 15 | * along with this program; if not, write the Free Software Foundation, | 
|  | 16 | * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 19 | #include "xfs_fs.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include "xfs_types.h" | 
| Christoph Hellwig | ef14f0c | 2009-06-10 17:07:47 +0200 | [diff] [blame] | 21 | #include "xfs_acl.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 22 | #include "xfs_bit.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include "xfs_log.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 24 | #include "xfs_inum.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include "xfs_trans.h" | 
|  | 26 | #include "xfs_sb.h" | 
|  | 27 | #include "xfs_ag.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include "xfs_mount.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include "xfs_bmap_btree.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 30 | #include "xfs_alloc_btree.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include "xfs_ialloc_btree.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include "xfs_dinode.h" | 
|  | 33 | #include "xfs_inode.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 34 | #include "xfs_btree.h" | 
|  | 35 | #include "xfs_ialloc.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include "xfs_quota.h" | 
|  | 37 | #include "xfs_utils.h" | 
| David Chinner | 783a2f6 | 2008-10-30 17:39:58 +1100 | [diff] [blame] | 38 | #include "xfs_trans_priv.h" | 
|  | 39 | #include "xfs_inode_item.h" | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 40 | #include "xfs_bmap.h" | 
|  | 41 | #include "xfs_btree_trace.h" | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 42 | #include "xfs_trace.h" | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 43 |  | 
|  | 44 |  | 
|  | 45 | /* | 
| Dave Chinner | dcfcf20 | 2010-12-23 11:57:13 +1100 | [diff] [blame] | 46 | * Define xfs inode iolock lockdep classes. We need to ensure that all active | 
|  | 47 | * inodes are considered the same for lockdep purposes, including inodes that | 
|  | 48 | * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to | 
|  | 49 | * guarantee the locks are considered the same when there are multiple lock | 
|  | 50 | * initialisation siteѕ. Also, define a reclaimable inode class so it is | 
|  | 51 | * obvious in lockdep reports which class the report is against. | 
|  | 52 | */ | 
|  | 53 | static struct lock_class_key xfs_iolock_active; | 
|  | 54 | struct lock_class_key xfs_iolock_reclaimable; | 
|  | 55 |  | 
|  | 56 | /* | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 57 | * Allocate and initialise an xfs_inode. | 
|  | 58 | */ | 
|  | 59 | STATIC struct xfs_inode * | 
|  | 60 | xfs_inode_alloc( | 
|  | 61 | struct xfs_mount	*mp, | 
|  | 62 | xfs_ino_t		ino) | 
|  | 63 | { | 
|  | 64 | struct xfs_inode	*ip; | 
|  | 65 |  | 
|  | 66 | /* | 
|  | 67 | * if this didn't occur in transactions, we could use | 
|  | 68 | * KM_MAYFAIL and return NULL here on ENOMEM. Set the | 
|  | 69 | * code up to do this anyway. | 
|  | 70 | */ | 
|  | 71 | ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); | 
|  | 72 | if (!ip) | 
|  | 73 | return NULL; | 
| Christoph Hellwig | 54e3462 | 2009-08-07 14:38:25 -0300 | [diff] [blame] | 74 | if (inode_init_always(mp->m_super, VFS_I(ip))) { | 
|  | 75 | kmem_zone_free(xfs_inode_zone, ip); | 
|  | 76 | return NULL; | 
|  | 77 | } | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 78 |  | 
|  | 79 | ASSERT(atomic_read(&ip->i_iocount) == 0); | 
|  | 80 | ASSERT(atomic_read(&ip->i_pincount) == 0); | 
|  | 81 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | 
|  | 82 | ASSERT(completion_done(&ip->i_flush)); | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 83 | ASSERT(ip->i_ino == 0); | 
| Christoph Hellwig | 033da48 | 2009-10-19 04:05:26 +0000 | [diff] [blame] | 84 |  | 
|  | 85 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | 
| Dave Chinner | dcfcf20 | 2010-12-23 11:57:13 +1100 | [diff] [blame] | 86 | lockdep_set_class_and_name(&ip->i_iolock.mr_lock, | 
|  | 87 | &xfs_iolock_active, "xfs_iolock_active"); | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 88 |  | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 89 | /* initialise the xfs inode */ | 
|  | 90 | ip->i_ino = ino; | 
|  | 91 | ip->i_mount = mp; | 
|  | 92 | memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); | 
|  | 93 | ip->i_afp = NULL; | 
|  | 94 | memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); | 
|  | 95 | ip->i_flags = 0; | 
|  | 96 | ip->i_update_core = 0; | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 97 | ip->i_delayed_blks = 0; | 
|  | 98 | memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); | 
|  | 99 | ip->i_size = 0; | 
|  | 100 | ip->i_new_size = 0; | 
|  | 101 |  | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 102 | return ip; | 
|  | 103 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 |  | 
| Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 105 | STATIC void | 
|  | 106 | xfs_inode_free_callback( | 
|  | 107 | struct rcu_head		*head) | 
|  | 108 | { | 
|  | 109 | struct inode		*inode = container_of(head, struct inode, i_rcu); | 
|  | 110 | struct xfs_inode	*ip = XFS_I(inode); | 
|  | 111 |  | 
|  | 112 | INIT_LIST_HEAD(&inode->i_dentry); | 
|  | 113 | kmem_zone_free(xfs_inode_zone, ip); | 
|  | 114 | } | 
|  | 115 |  | 
| Dave Chinner | 2f11fea | 2010-07-20 17:53:25 +1000 | [diff] [blame] | 116 | void | 
| Christoph Hellwig | b36ec04 | 2009-08-07 14:38:34 -0300 | [diff] [blame] | 117 | xfs_inode_free( | 
|  | 118 | struct xfs_inode	*ip) | 
|  | 119 | { | 
|  | 120 | switch (ip->i_d.di_mode & S_IFMT) { | 
|  | 121 | case S_IFREG: | 
|  | 122 | case S_IFDIR: | 
|  | 123 | case S_IFLNK: | 
|  | 124 | xfs_idestroy_fork(ip, XFS_DATA_FORK); | 
|  | 125 | break; | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | if (ip->i_afp) | 
|  | 129 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | 
|  | 130 |  | 
| Christoph Hellwig | b36ec04 | 2009-08-07 14:38:34 -0300 | [diff] [blame] | 131 | if (ip->i_itemp) { | 
|  | 132 | /* | 
|  | 133 | * Only if we are shutting down the fs will we see an | 
|  | 134 | * inode still in the AIL. If it is there, we should remove | 
|  | 135 | * it to prevent a use-after-free from occurring. | 
|  | 136 | */ | 
|  | 137 | xfs_log_item_t	*lip = &ip->i_itemp->ili_item; | 
|  | 138 | struct xfs_ail	*ailp = lip->li_ailp; | 
|  | 139 |  | 
|  | 140 | ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || | 
|  | 141 | XFS_FORCED_SHUTDOWN(ip->i_mount)); | 
|  | 142 | if (lip->li_flags & XFS_LI_IN_AIL) { | 
|  | 143 | spin_lock(&ailp->xa_lock); | 
|  | 144 | if (lip->li_flags & XFS_LI_IN_AIL) | 
|  | 145 | xfs_trans_ail_delete(ailp, lip); | 
|  | 146 | else | 
|  | 147 | spin_unlock(&ailp->xa_lock); | 
|  | 148 | } | 
|  | 149 | xfs_inode_item_destroy(ip); | 
|  | 150 | ip->i_itemp = NULL; | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | /* asserts to verify all state is correct here */ | 
|  | 154 | ASSERT(atomic_read(&ip->i_iocount) == 0); | 
|  | 155 | ASSERT(atomic_read(&ip->i_pincount) == 0); | 
|  | 156 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | 
|  | 157 | ASSERT(completion_done(&ip->i_flush)); | 
|  | 158 |  | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 159 | /* | 
|  | 160 | * Because we use RCU freeing we need to ensure the inode always | 
|  | 161 | * appears to be reclaimed with an invalid inode number when in the | 
|  | 162 | * free state. The ip->i_flags_lock provides the barrier against lookup | 
|  | 163 | * races. | 
|  | 164 | */ | 
|  | 165 | spin_lock(&ip->i_flags_lock); | 
|  | 166 | ip->i_flags = XFS_IRECLAIM; | 
|  | 167 | ip->i_ino = 0; | 
|  | 168 | spin_unlock(&ip->i_flags_lock); | 
| Alex Elder | 92f1c00 | 2011-01-10 21:35:55 -0600 | [diff] [blame] | 169 |  | 
|  | 170 | call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); | 
| Christoph Hellwig | b36ec04 | 2009-08-07 14:38:34 -0300 | [diff] [blame] | 171 | } | 
|  | 172 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | /* | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 174 | * Check the validity of the inode we just found it the cache | 
|  | 175 | */ | 
|  | 176 | static int | 
|  | 177 | xfs_iget_cache_hit( | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 178 | struct xfs_perag	*pag, | 
|  | 179 | struct xfs_inode	*ip, | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 180 | xfs_ino_t		ino, | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 181 | int			flags, | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 182 | int			lock_flags) __releases(RCU) | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 183 | { | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 184 | struct inode		*inode = VFS_I(ip); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 185 | struct xfs_mount	*mp = ip->i_mount; | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 186 | int			error; | 
|  | 187 |  | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 188 | /* | 
|  | 189 | * check for re-use of an inode within an RCU grace period due to the | 
|  | 190 | * radix tree nodes not being updated yet. We monitor for this by | 
|  | 191 | * setting the inode number to zero before freeing the inode structure. | 
|  | 192 | * If the inode has been reallocated and set up, then the inode number | 
|  | 193 | * will not match, so check for that, too. | 
|  | 194 | */ | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 195 | spin_lock(&ip->i_flags_lock); | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 196 | if (ip->i_ino != ino) { | 
|  | 197 | trace_xfs_iget_skip(ip); | 
|  | 198 | XFS_STATS_INC(xs_ig_frecycle); | 
|  | 199 | error = EAGAIN; | 
|  | 200 | goto out_error; | 
|  | 201 | } | 
|  | 202 |  | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 203 |  | 
|  | 204 | /* | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 205 | * If we are racing with another cache hit that is currently | 
|  | 206 | * instantiating this inode or currently recycling it out of | 
|  | 207 | * reclaimabe state, wait for the initialisation to complete | 
|  | 208 | * before continuing. | 
|  | 209 | * | 
|  | 210 | * XXX(hch): eventually we should do something equivalent to | 
|  | 211 | *	     wait_on_inode to wait for these flags to be cleared | 
|  | 212 | *	     instead of polling for it. | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 213 | */ | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 214 | if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 215 | trace_xfs_iget_skip(ip); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 216 | XFS_STATS_INC(xs_ig_frecycle); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 217 | error = EAGAIN; | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 218 | goto out_error; | 
|  | 219 | } | 
|  | 220 |  | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 221 | /* | 
|  | 222 | * If lookup is racing with unlink return an error immediately. | 
|  | 223 | */ | 
|  | 224 | if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { | 
|  | 225 | error = ENOENT; | 
|  | 226 | goto out_error; | 
|  | 227 | } | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 228 |  | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 229 | /* | 
|  | 230 | * If IRECLAIMABLE is set, we've torn down the VFS inode already. | 
|  | 231 | * Need to carefully get it back into useable state. | 
|  | 232 | */ | 
|  | 233 | if (ip->i_flags & XFS_IRECLAIMABLE) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 234 | trace_xfs_iget_reclaim(ip); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 235 |  | 
| David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 236 | /* | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 237 | * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode | 
|  | 238 | * from stomping over us while we recycle the inode.  We can't | 
|  | 239 | * clear the radix tree reclaimable tag yet as it requires | 
|  | 240 | * pag_ici_lock to be held exclusive. | 
| David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 241 | */ | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 242 | ip->i_flags |= XFS_IRECLAIM; | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 243 |  | 
|  | 244 | spin_unlock(&ip->i_flags_lock); | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 245 | rcu_read_unlock(); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 246 |  | 
|  | 247 | error = -inode_init_always(mp->m_super, inode); | 
|  | 248 | if (error) { | 
|  | 249 | /* | 
|  | 250 | * Re-initializing the inode failed, and we are in deep | 
|  | 251 | * trouble.  Try to re-add it to the reclaim list. | 
|  | 252 | */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 253 | rcu_read_lock(); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 254 | spin_lock(&ip->i_flags_lock); | 
|  | 255 |  | 
|  | 256 | ip->i_flags &= ~XFS_INEW; | 
|  | 257 | ip->i_flags |= XFS_IRECLAIMABLE; | 
|  | 258 | __xfs_inode_set_reclaim_tag(pag, ip); | 
| Christoph Hellwig | d2e078c | 2010-06-24 11:50:22 +1000 | [diff] [blame] | 259 | trace_xfs_iget_reclaim_fail(ip); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 260 | goto out_error; | 
|  | 261 | } | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 262 |  | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 263 | spin_lock(&pag->pag_ici_lock); | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 264 | spin_lock(&ip->i_flags_lock); | 
|  | 265 | ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); | 
|  | 266 | ip->i_flags |= XFS_INEW; | 
|  | 267 | __xfs_inode_clear_reclaim_tag(mp, pag, ip); | 
| Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 268 | inode->i_state = I_NEW; | 
| Dave Chinner | dcfcf20 | 2010-12-23 11:57:13 +1100 | [diff] [blame] | 269 |  | 
|  | 270 | ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); | 
|  | 271 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | 
|  | 272 | lockdep_set_class_and_name(&ip->i_iolock.mr_lock, | 
|  | 273 | &xfs_iolock_active, "xfs_iolock_active"); | 
|  | 274 |  | 
| Christoph Hellwig | f1f724e | 2010-03-01 11:30:31 +0000 | [diff] [blame] | 275 | spin_unlock(&ip->i_flags_lock); | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 276 | spin_unlock(&pag->pag_ici_lock); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 277 | } else { | 
|  | 278 | /* If the VFS inode is being torn down, pause and try again. */ | 
|  | 279 | if (!igrab(inode)) { | 
| Christoph Hellwig | d2e078c | 2010-06-24 11:50:22 +1000 | [diff] [blame] | 280 | trace_xfs_iget_skip(ip); | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 281 | error = EAGAIN; | 
| David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 282 | goto out_error; | 
|  | 283 | } | 
| David Chinner | 6bfb3d0 | 2008-10-30 18:32:43 +1100 | [diff] [blame] | 284 |  | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 285 | /* We've got a live one. */ | 
|  | 286 | spin_unlock(&ip->i_flags_lock); | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 287 | rcu_read_unlock(); | 
| Christoph Hellwig | d2e078c | 2010-06-24 11:50:22 +1000 | [diff] [blame] | 288 | trace_xfs_iget_hit(ip); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 289 | } | 
|  | 290 |  | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 291 | if (lock_flags != 0) | 
|  | 292 | xfs_ilock(ip, lock_flags); | 
|  | 293 |  | 
|  | 294 | xfs_iflags_clear(ip, XFS_ISTALE); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 295 | XFS_STATS_INC(xs_ig_found); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 296 |  | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 297 | return 0; | 
|  | 298 |  | 
|  | 299 | out_error: | 
| Christoph Hellwig | bc990f5 | 2009-08-16 20:36:34 -0400 | [diff] [blame] | 300 | spin_unlock(&ip->i_flags_lock); | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 301 | rcu_read_unlock(); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 302 | return error; | 
|  | 303 | } | 
|  | 304 |  | 
|  | 305 |  | 
|  | 306 | static int | 
|  | 307 | xfs_iget_cache_miss( | 
|  | 308 | struct xfs_mount	*mp, | 
|  | 309 | struct xfs_perag	*pag, | 
|  | 310 | xfs_trans_t		*tp, | 
|  | 311 | xfs_ino_t		ino, | 
|  | 312 | struct xfs_inode	**ipp, | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 313 | int			flags, | 
| Christoph Hellwig | 0c3dc2b | 2009-11-14 16:17:23 +0000 | [diff] [blame] | 314 | int			lock_flags) | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 315 | { | 
|  | 316 | struct xfs_inode	*ip; | 
|  | 317 | int			error; | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 318 | xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino); | 
|  | 319 |  | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 320 | ip = xfs_inode_alloc(mp, ino); | 
|  | 321 | if (!ip) | 
|  | 322 | return ENOMEM; | 
|  | 323 |  | 
| Dave Chinner | 7b6259e | 2010-06-24 11:35:17 +1000 | [diff] [blame] | 324 | error = xfs_iread(mp, tp, ip, flags); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 325 | if (error) | 
| Christoph Hellwig | 24f211b | 2008-11-28 14:23:42 +1100 | [diff] [blame] | 326 | goto out_destroy; | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 327 |  | 
| Christoph Hellwig | d2e078c | 2010-06-24 11:50:22 +1000 | [diff] [blame] | 328 | trace_xfs_iget_miss(ip); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 329 |  | 
|  | 330 | if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { | 
|  | 331 | error = ENOENT; | 
|  | 332 | goto out_destroy; | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | /* | 
|  | 336 | * Preload the radix tree so we can insert safely under the | 
| David Chinner | 56e73ec | 2008-10-30 17:55:27 +1100 | [diff] [blame] | 337 | * write spinlock. Note that we cannot sleep inside the preload | 
|  | 338 | * region. | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 339 | */ | 
|  | 340 | if (radix_tree_preload(GFP_KERNEL)) { | 
|  | 341 | error = EAGAIN; | 
| Christoph Hellwig | ed93ec3 | 2009-03-03 14:48:35 -0500 | [diff] [blame] | 342 | goto out_destroy; | 
|  | 343 | } | 
|  | 344 |  | 
|  | 345 | /* | 
|  | 346 | * Because the inode hasn't been added to the radix-tree yet it can't | 
|  | 347 | * be found by another thread, so we can do the non-sleeping lock here. | 
|  | 348 | */ | 
|  | 349 | if (lock_flags) { | 
|  | 350 | if (!xfs_ilock_nowait(ip, lock_flags)) | 
|  | 351 | BUG(); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 352 | } | 
|  | 353 |  | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 354 | spin_lock(&pag->pag_ici_lock); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 355 |  | 
|  | 356 | /* insert the new inode */ | 
|  | 357 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); | 
|  | 358 | if (unlikely(error)) { | 
|  | 359 | WARN_ON(error != -EEXIST); | 
|  | 360 | XFS_STATS_INC(xs_ig_dup); | 
|  | 361 | error = EAGAIN; | 
| David Chinner | 56e73ec | 2008-10-30 17:55:27 +1100 | [diff] [blame] | 362 | goto out_preload_end; | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 363 | } | 
|  | 364 |  | 
|  | 365 | /* These values _must_ be set before releasing the radix tree lock! */ | 
|  | 366 | ip->i_udquot = ip->i_gdquot = NULL; | 
|  | 367 | xfs_iflags_set(ip, XFS_INEW); | 
|  | 368 |  | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 369 | spin_unlock(&pag->pag_ici_lock); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 370 | radix_tree_preload_end(); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 371 |  | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 372 | *ipp = ip; | 
|  | 373 | return 0; | 
|  | 374 |  | 
| David Chinner | 56e73ec | 2008-10-30 17:55:27 +1100 | [diff] [blame] | 375 | out_preload_end: | 
| Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 376 | spin_unlock(&pag->pag_ici_lock); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 377 | radix_tree_preload_end(); | 
| David Chinner | 56e73ec | 2008-10-30 17:55:27 +1100 | [diff] [blame] | 378 | if (lock_flags) | 
|  | 379 | xfs_iunlock(ip, lock_flags); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 380 | out_destroy: | 
| Christoph Hellwig | b36ec04 | 2009-08-07 14:38:34 -0300 | [diff] [blame] | 381 | __destroy_inode(VFS_I(ip)); | 
|  | 382 | xfs_inode_free(ip); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 383 | return error; | 
|  | 384 | } | 
|  | 385 |  | 
|  | 386 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | * Look up an inode by number in the given file system. | 
| David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 388 | * The inode is looked up in the cache held in each AG. | 
| David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 389 | * If the inode is found in the cache, initialise the vfs inode | 
|  | 390 | * if necessary. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | * | 
| David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 392 | * If it is not in core, read it in from the file system's device, | 
| David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 393 | * add it to the cache and initialise the vfs inode. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | * | 
|  | 395 | * The inode is locked according to the value of the lock_flags parameter. | 
|  | 396 | * This flag parameter indicates how and if the inode's IO lock and inode lock | 
|  | 397 | * should be taken. | 
|  | 398 | * | 
|  | 399 | * mp -- the mount point structure for the current file system.  It points | 
|  | 400 | *       to the inode hash table. | 
|  | 401 | * tp -- a pointer to the current transaction if there is one.  This is | 
|  | 402 | *       simply passed through to the xfs_iread() call. | 
|  | 403 | * ino -- the number of the inode desired.  This is the unique identifier | 
|  | 404 | *        within the file system for the inode being requested. | 
|  | 405 | * lock_flags -- flags indicating how to lock the inode.  See the comment | 
|  | 406 | *		 for xfs_ilock() for a list of valid values. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | */ | 
| David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 408 | int | 
|  | 409 | xfs_iget( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | xfs_mount_t	*mp, | 
|  | 411 | xfs_trans_t	*tp, | 
|  | 412 | xfs_ino_t	ino, | 
|  | 413 | uint		flags, | 
|  | 414 | uint		lock_flags, | 
| Dave Chinner | 7b6259e | 2010-06-24 11:35:17 +1000 | [diff] [blame] | 415 | xfs_inode_t	**ipp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | xfs_inode_t	*ip; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | int		error; | 
| David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 419 | xfs_perag_t	*pag; | 
|  | 420 | xfs_agino_t	agino; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 |  | 
| Christoph Hellwig | d276734 | 2010-10-06 18:31:23 +0000 | [diff] [blame] | 422 | /* reject inode numbers outside existing AGs */ | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 423 | if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) | 
| David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 424 | return EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 |  | 
| David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 426 | /* get the perag structure and ensure that it's inode capable */ | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 427 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); | 
| David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 428 | agino = XFS_INO_TO_AGINO(mp, ino); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 |  | 
|  | 430 | again: | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 431 | error = 0; | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 432 | rcu_read_lock(); | 
| David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 433 | ip = radix_tree_lookup(&pag->pag_ici_root, agino); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 |  | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 435 | if (ip) { | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 436 | error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 437 | if (error) | 
|  | 438 | goto out_error_or_again; | 
|  | 439 | } else { | 
| Dave Chinner | 1a3e8f3 | 2010-12-17 17:29:43 +1100 | [diff] [blame] | 440 | rcu_read_unlock(); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 441 | XFS_STATS_INC(xs_ig_missed); | 
| David Chinner | da353b0 | 2007-08-28 14:00:13 +1000 | [diff] [blame] | 442 |  | 
| Dave Chinner | 7b6259e | 2010-06-24 11:35:17 +1000 | [diff] [blame] | 443 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 444 | flags, lock_flags); | 
|  | 445 | if (error) | 
|  | 446 | goto out_error_or_again; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | } | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 448 | xfs_perag_put(pag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | *ipp = ip; | 
|  | 451 |  | 
| David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 452 | ASSERT(ip->i_df.if_ext_max == | 
|  | 453 | XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); | 
| Christoph Hellwig | 41be8be | 2008-08-13 16:23:13 +1000 | [diff] [blame] | 454 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | * If we have a real type for an on-disk inode, we can set ops(&unlock) | 
|  | 456 | * now.	 If it's a new inode being created, xfs_ialloc will handle it. | 
|  | 457 | */ | 
| David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 458 | if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) | 
| Christoph Hellwig | 41be8be | 2008-08-13 16:23:13 +1000 | [diff] [blame] | 459 | xfs_setup_inode(ip); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | return 0; | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 461 |  | 
|  | 462 | out_error_or_again: | 
|  | 463 | if (error == EAGAIN) { | 
|  | 464 | delay(1); | 
|  | 465 | goto again; | 
|  | 466 | } | 
| Dave Chinner | 5017e97 | 2010-01-11 11:47:40 +0000 | [diff] [blame] | 467 | xfs_perag_put(pag); | 
| David Chinner | 6441e54 | 2008-10-30 17:21:19 +1100 | [diff] [blame] | 468 | return error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | } | 
|  | 470 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | * This is a wrapper routine around the xfs_ilock() routine | 
|  | 473 | * used to centralize some grungy code.  It is used in places | 
|  | 474 | * that wish to lock the inode solely for reading the extents. | 
|  | 475 | * The reason these places can't just call xfs_ilock(SHARED) | 
|  | 476 | * is that the inode lock also guards to bringing in of the | 
|  | 477 | * extents from disk for a file in b-tree format.  If the inode | 
|  | 478 | * is in b-tree format, then we need to lock the inode exclusively | 
|  | 479 | * until the extents are read in.  Locking it exclusively all | 
|  | 480 | * the time would limit our parallelism unnecessarily, though. | 
|  | 481 | * What we do instead is check to see if the extents have been | 
|  | 482 | * read in yet, and only lock the inode exclusively if they | 
|  | 483 | * have not. | 
|  | 484 | * | 
|  | 485 | * The function returns a value which should be given to the | 
|  | 486 | * corresponding xfs_iunlock_map_shared().  This value is | 
|  | 487 | * the mode in which the lock was actually taken. | 
|  | 488 | */ | 
|  | 489 | uint | 
|  | 490 | xfs_ilock_map_shared( | 
|  | 491 | xfs_inode_t	*ip) | 
|  | 492 | { | 
|  | 493 | uint	lock_mode; | 
|  | 494 |  | 
|  | 495 | if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && | 
|  | 496 | ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { | 
|  | 497 | lock_mode = XFS_ILOCK_EXCL; | 
|  | 498 | } else { | 
|  | 499 | lock_mode = XFS_ILOCK_SHARED; | 
|  | 500 | } | 
|  | 501 |  | 
|  | 502 | xfs_ilock(ip, lock_mode); | 
|  | 503 |  | 
|  | 504 | return lock_mode; | 
|  | 505 | } | 
|  | 506 |  | 
|  | 507 | /* | 
|  | 508 | * This is simply the unlock routine to go with xfs_ilock_map_shared(). | 
|  | 509 | * All it does is call xfs_iunlock() with the given lock_mode. | 
|  | 510 | */ | 
|  | 511 | void | 
|  | 512 | xfs_iunlock_map_shared( | 
|  | 513 | xfs_inode_t	*ip, | 
|  | 514 | unsigned int	lock_mode) | 
|  | 515 | { | 
|  | 516 | xfs_iunlock(ip, lock_mode); | 
|  | 517 | } | 
|  | 518 |  | 
|  | 519 | /* | 
|  | 520 | * The xfs inode contains 2 locks: a multi-reader lock called the | 
|  | 521 | * i_iolock and a multi-reader lock called the i_lock.  This routine | 
|  | 522 | * allows either or both of the locks to be obtained. | 
|  | 523 | * | 
|  | 524 | * The 2 locks should always be ordered so that the IO lock is | 
|  | 525 | * obtained first in order to prevent deadlock. | 
|  | 526 | * | 
|  | 527 | * ip -- the inode being locked | 
|  | 528 | * lock_flags -- this parameter indicates the inode's locks | 
|  | 529 | *       to be locked.  It can be: | 
|  | 530 | *		XFS_IOLOCK_SHARED, | 
|  | 531 | *		XFS_IOLOCK_EXCL, | 
|  | 532 | *		XFS_ILOCK_SHARED, | 
|  | 533 | *		XFS_ILOCK_EXCL, | 
|  | 534 | *		XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED, | 
|  | 535 | *		XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL, | 
|  | 536 | *		XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED, | 
|  | 537 | *		XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL | 
|  | 538 | */ | 
|  | 539 | void | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 540 | xfs_ilock( | 
|  | 541 | xfs_inode_t		*ip, | 
|  | 542 | uint			lock_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | { | 
|  | 544 | /* | 
|  | 545 | * You can't set both SHARED and EXCL for the same lock, | 
|  | 546 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | 
|  | 547 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | 
|  | 548 | */ | 
|  | 549 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | 
|  | 550 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | 
|  | 551 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != | 
|  | 552 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | 
| Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 553 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 |  | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 555 | if (lock_flags & XFS_IOLOCK_EXCL) | 
| Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 556 | mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 557 | else if (lock_flags & XFS_IOLOCK_SHARED) | 
| Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 558 | mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 559 |  | 
|  | 560 | if (lock_flags & XFS_ILOCK_EXCL) | 
| Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 561 | mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 562 | else if (lock_flags & XFS_ILOCK_SHARED) | 
| Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 563 | mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 564 |  | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 565 | trace_xfs_ilock(ip, lock_flags, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | } | 
|  | 567 |  | 
|  | 568 | /* | 
|  | 569 | * This is just like xfs_ilock(), except that the caller | 
|  | 570 | * is guaranteed not to sleep.  It returns 1 if it gets | 
|  | 571 | * the requested locks and 0 otherwise.  If the IO lock is | 
|  | 572 | * obtained but the inode lock cannot be, then the IO lock | 
|  | 573 | * is dropped before returning. | 
|  | 574 | * | 
|  | 575 | * ip -- the inode being locked | 
|  | 576 | * lock_flags -- this parameter indicates the inode's locks to be | 
|  | 577 | *       to be locked.  See the comment for xfs_ilock() for a list | 
|  | 578 | *	 of valid values. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | */ | 
|  | 580 | int | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 581 | xfs_ilock_nowait( | 
|  | 582 | xfs_inode_t		*ip, | 
|  | 583 | uint			lock_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | /* | 
|  | 586 | * You can't set both SHARED and EXCL for the same lock, | 
|  | 587 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | 
|  | 588 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | 
|  | 589 | */ | 
|  | 590 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | 
|  | 591 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | 
|  | 592 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != | 
|  | 593 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | 
| Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 594 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | if (lock_flags & XFS_IOLOCK_EXCL) { | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 597 | if (!mrtryupdate(&ip->i_iolock)) | 
|  | 598 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | } else if (lock_flags & XFS_IOLOCK_SHARED) { | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 600 | if (!mrtryaccess(&ip->i_iolock)) | 
|  | 601 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | } | 
|  | 603 | if (lock_flags & XFS_ILOCK_EXCL) { | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 604 | if (!mrtryupdate(&ip->i_lock)) | 
|  | 605 | goto out_undo_iolock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | } else if (lock_flags & XFS_ILOCK_SHARED) { | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 607 | if (!mrtryaccess(&ip->i_lock)) | 
|  | 608 | goto out_undo_iolock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | } | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 610 | trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | return 1; | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 612 |  | 
|  | 613 | out_undo_iolock: | 
|  | 614 | if (lock_flags & XFS_IOLOCK_EXCL) | 
|  | 615 | mrunlock_excl(&ip->i_iolock); | 
|  | 616 | else if (lock_flags & XFS_IOLOCK_SHARED) | 
|  | 617 | mrunlock_shared(&ip->i_iolock); | 
|  | 618 | out: | 
|  | 619 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | } | 
|  | 621 |  | 
|  | 622 | /* | 
|  | 623 | * xfs_iunlock() is used to drop the inode locks acquired with | 
|  | 624 | * xfs_ilock() and xfs_ilock_nowait().  The caller must pass | 
|  | 625 | * in the flags given to xfs_ilock() or xfs_ilock_nowait() so | 
|  | 626 | * that we know which locks to drop. | 
|  | 627 | * | 
|  | 628 | * ip -- the inode being unlocked | 
|  | 629 | * lock_flags -- this parameter indicates the inode's locks to be | 
|  | 630 | *       to be unlocked.  See the comment for xfs_ilock() for a list | 
|  | 631 | *	 of valid values for this parameter. | 
|  | 632 | * | 
|  | 633 | */ | 
|  | 634 | void | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 635 | xfs_iunlock( | 
|  | 636 | xfs_inode_t		*ip, | 
|  | 637 | uint			lock_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | { | 
|  | 639 | /* | 
|  | 640 | * You can't set both SHARED and EXCL for the same lock, | 
|  | 641 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | 
|  | 642 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | 
|  | 643 | */ | 
|  | 644 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | 
|  | 645 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | 
|  | 646 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != | 
|  | 647 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | 
| Lachlan McIlroy | f7c66ce | 2007-05-08 13:50:19 +1000 | [diff] [blame] | 648 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY | | 
|  | 649 | XFS_LOCK_DEP_MASK)) == 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | ASSERT(lock_flags != 0); | 
|  | 651 |  | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 652 | if (lock_flags & XFS_IOLOCK_EXCL) | 
|  | 653 | mrunlock_excl(&ip->i_iolock); | 
|  | 654 | else if (lock_flags & XFS_IOLOCK_SHARED) | 
|  | 655 | mrunlock_shared(&ip->i_iolock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 |  | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 657 | if (lock_flags & XFS_ILOCK_EXCL) | 
|  | 658 | mrunlock_excl(&ip->i_lock); | 
|  | 659 | else if (lock_flags & XFS_ILOCK_SHARED) | 
|  | 660 | mrunlock_shared(&ip->i_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 |  | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 662 | if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) && | 
|  | 663 | !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | /* | 
|  | 665 | * Let the AIL know that this item has been unlocked in case | 
|  | 666 | * it is in the AIL and anyone is waiting on it.  Don't do | 
|  | 667 | * this if the caller has asked us not to. | 
|  | 668 | */ | 
| David Chinner | 783a2f6 | 2008-10-30 17:39:58 +1100 | [diff] [blame] | 669 | xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 670 | (xfs_log_item_t*)(ip->i_itemp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | } | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 672 | trace_xfs_iunlock(ip, lock_flags, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | } | 
|  | 674 |  | 
|  | 675 | /* | 
|  | 676 | * give up write locks.  the i/o lock cannot be held nested | 
|  | 677 | * if it is being demoted. | 
|  | 678 | */ | 
|  | 679 | void | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 680 | xfs_ilock_demote( | 
|  | 681 | xfs_inode_t		*ip, | 
|  | 682 | uint			lock_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | { | 
|  | 684 | ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); | 
|  | 685 | ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); | 
|  | 686 |  | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 687 | if (lock_flags & XFS_ILOCK_EXCL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | mrdemote(&ip->i_lock); | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 689 | if (lock_flags & XFS_IOLOCK_EXCL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | mrdemote(&ip->i_iolock); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 691 |  | 
|  | 692 | trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | } | 
|  | 694 |  | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 695 | #ifdef DEBUG | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 696 | int | 
|  | 697 | xfs_isilocked( | 
|  | 698 | xfs_inode_t		*ip, | 
|  | 699 | uint			lock_flags) | 
|  | 700 | { | 
| Christoph Hellwig | f936972 | 2010-06-03 16:22:29 +1000 | [diff] [blame] | 701 | if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { | 
|  | 702 | if (!(lock_flags & XFS_ILOCK_SHARED)) | 
|  | 703 | return !!ip->i_lock.mr_writer; | 
|  | 704 | return rwsem_is_locked(&ip->i_lock.mr_lock); | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 705 | } | 
|  | 706 |  | 
| Christoph Hellwig | f936972 | 2010-06-03 16:22:29 +1000 | [diff] [blame] | 707 | if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { | 
|  | 708 | if (!(lock_flags & XFS_IOLOCK_SHARED)) | 
|  | 709 | return !!ip->i_iolock.mr_writer; | 
|  | 710 | return rwsem_is_locked(&ip->i_iolock.mr_lock); | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 711 | } | 
|  | 712 |  | 
| Christoph Hellwig | f936972 | 2010-06-03 16:22:29 +1000 | [diff] [blame] | 713 | ASSERT(0); | 
|  | 714 | return 0; | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 715 | } | 
|  | 716 | #endif |