Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 19 | #include "xfs_fs.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include "xfs_types.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 21 | #include "xfs_bit.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include "xfs_log.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 23 | #include "xfs_inum.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include "xfs_trans.h" |
Christoph Hellwig | 211e4d4 | 2012-04-23 15:58:34 +1000 | [diff] [blame] | 25 | #include "xfs_trans_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include "xfs_sb.h" |
| 27 | #include "xfs_ag.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include "xfs_dir2.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include "xfs_bmap_btree.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 31 | #include "xfs_alloc_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include "xfs_ialloc_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include "xfs_dinode.h" |
| 34 | #include "xfs_inode.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 35 | #include "xfs_btree.h" |
| 36 | #include "xfs_ialloc.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include "xfs_alloc.h" |
| 38 | #include "xfs_rtalloc.h" |
| 39 | #include "xfs_bmap.h" |
| 40 | #include "xfs_error.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include "xfs_quota.h" |
| 42 | #include "xfs_fsops.h" |
Christoph Hellwig | 4335509 | 2008-03-27 18:01:08 +1100 | [diff] [blame] | 43 | #include "xfs_utils.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 44 | #include "xfs_trace.h" |
Dave Chinner | 6d8b79c | 2012-10-08 21:56:09 +1100 | [diff] [blame] | 45 | #include "xfs_icache.h" |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 46 | #include "xfs_cksum.h" |
| 47 | #include "xfs_buf_item.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 48 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 50 | #ifdef HAVE_PERCPU_SB |
David Chinner | 20f4ebf | 2007-02-10 18:36:10 +1100 | [diff] [blame] | 51 | STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 52 | int); |
| 53 | STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, |
| 54 | int); |
David Chinner | 36fbe6e | 2008-04-10 12:19:56 +1000 | [diff] [blame] | 55 | STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 56 | #else |
| 57 | |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 58 | #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) |
| 59 | #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 60 | #endif |
| 61 | |
Christoph Hellwig | 1df84c9 | 2006-01-11 15:29:52 +1100 | [diff] [blame] | 62 | static const struct { |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 63 | short offset; |
| 64 | short type; /* 0 = integer |
| 65 | * 1 = binary / string (no translation) |
| 66 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | } xfs_sb_info[] = { |
| 68 | { offsetof(xfs_sb_t, sb_magicnum), 0 }, |
| 69 | { offsetof(xfs_sb_t, sb_blocksize), 0 }, |
| 70 | { offsetof(xfs_sb_t, sb_dblocks), 0 }, |
| 71 | { offsetof(xfs_sb_t, sb_rblocks), 0 }, |
| 72 | { offsetof(xfs_sb_t, sb_rextents), 0 }, |
| 73 | { offsetof(xfs_sb_t, sb_uuid), 1 }, |
| 74 | { offsetof(xfs_sb_t, sb_logstart), 0 }, |
| 75 | { offsetof(xfs_sb_t, sb_rootino), 0 }, |
| 76 | { offsetof(xfs_sb_t, sb_rbmino), 0 }, |
| 77 | { offsetof(xfs_sb_t, sb_rsumino), 0 }, |
| 78 | { offsetof(xfs_sb_t, sb_rextsize), 0 }, |
| 79 | { offsetof(xfs_sb_t, sb_agblocks), 0 }, |
| 80 | { offsetof(xfs_sb_t, sb_agcount), 0 }, |
| 81 | { offsetof(xfs_sb_t, sb_rbmblocks), 0 }, |
| 82 | { offsetof(xfs_sb_t, sb_logblocks), 0 }, |
| 83 | { offsetof(xfs_sb_t, sb_versionnum), 0 }, |
| 84 | { offsetof(xfs_sb_t, sb_sectsize), 0 }, |
| 85 | { offsetof(xfs_sb_t, sb_inodesize), 0 }, |
| 86 | { offsetof(xfs_sb_t, sb_inopblock), 0 }, |
| 87 | { offsetof(xfs_sb_t, sb_fname[0]), 1 }, |
| 88 | { offsetof(xfs_sb_t, sb_blocklog), 0 }, |
| 89 | { offsetof(xfs_sb_t, sb_sectlog), 0 }, |
| 90 | { offsetof(xfs_sb_t, sb_inodelog), 0 }, |
| 91 | { offsetof(xfs_sb_t, sb_inopblog), 0 }, |
| 92 | { offsetof(xfs_sb_t, sb_agblklog), 0 }, |
| 93 | { offsetof(xfs_sb_t, sb_rextslog), 0 }, |
| 94 | { offsetof(xfs_sb_t, sb_inprogress), 0 }, |
| 95 | { offsetof(xfs_sb_t, sb_imax_pct), 0 }, |
| 96 | { offsetof(xfs_sb_t, sb_icount), 0 }, |
| 97 | { offsetof(xfs_sb_t, sb_ifree), 0 }, |
| 98 | { offsetof(xfs_sb_t, sb_fdblocks), 0 }, |
| 99 | { offsetof(xfs_sb_t, sb_frextents), 0 }, |
| 100 | { offsetof(xfs_sb_t, sb_uquotino), 0 }, |
| 101 | { offsetof(xfs_sb_t, sb_gquotino), 0 }, |
| 102 | { offsetof(xfs_sb_t, sb_qflags), 0 }, |
| 103 | { offsetof(xfs_sb_t, sb_flags), 0 }, |
| 104 | { offsetof(xfs_sb_t, sb_shared_vn), 0 }, |
| 105 | { offsetof(xfs_sb_t, sb_inoalignmt), 0 }, |
| 106 | { offsetof(xfs_sb_t, sb_unit), 0 }, |
| 107 | { offsetof(xfs_sb_t, sb_width), 0 }, |
| 108 | { offsetof(xfs_sb_t, sb_dirblklog), 0 }, |
| 109 | { offsetof(xfs_sb_t, sb_logsectlog), 0 }, |
| 110 | { offsetof(xfs_sb_t, sb_logsectsize),0 }, |
| 111 | { offsetof(xfs_sb_t, sb_logsunit), 0 }, |
| 112 | { offsetof(xfs_sb_t, sb_features2), 0 }, |
David Chinner | ee1c090 | 2008-03-06 13:45:50 +1100 | [diff] [blame] | 113 | { offsetof(xfs_sb_t, sb_bad_features2), 0 }, |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 114 | { offsetof(xfs_sb_t, sb_features_compat), 0 }, |
| 115 | { offsetof(xfs_sb_t, sb_features_ro_compat), 0 }, |
| 116 | { offsetof(xfs_sb_t, sb_features_incompat), 0 }, |
Dave Chinner | e721f50 | 2013-04-03 16:11:32 +1100 | [diff] [blame] | 117 | { offsetof(xfs_sb_t, sb_features_log_incompat), 0 }, |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 118 | { offsetof(xfs_sb_t, sb_crc), 0 }, |
Dave Chinner | e721f50 | 2013-04-03 16:11:32 +1100 | [diff] [blame] | 119 | { offsetof(xfs_sb_t, sb_pad), 0 }, |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 120 | { offsetof(xfs_sb_t, sb_pquotino), 0 }, |
| 121 | { offsetof(xfs_sb_t, sb_lsn), 0 }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | { sizeof(xfs_sb_t), 0 } |
| 123 | }; |
| 124 | |
Christoph Hellwig | 2717420 | 2009-03-30 10:21:31 +0200 | [diff] [blame] | 125 | static DEFINE_MUTEX(xfs_uuid_table_mutex); |
| 126 | static int xfs_uuid_table_size; |
| 127 | static uuid_t *xfs_uuid_table; |
| 128 | |
| 129 | /* |
| 130 | * See if the UUID is unique among mounted XFS filesystems. |
| 131 | * Mount fails if UUID is nil or a FS with the same UUID is already mounted. |
| 132 | */ |
| 133 | STATIC int |
| 134 | xfs_uuid_mount( |
| 135 | struct xfs_mount *mp) |
| 136 | { |
| 137 | uuid_t *uuid = &mp->m_sb.sb_uuid; |
| 138 | int hole, i; |
| 139 | |
| 140 | if (mp->m_flags & XFS_MOUNT_NOUUID) |
| 141 | return 0; |
| 142 | |
| 143 | if (uuid_is_nil(uuid)) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 144 | xfs_warn(mp, "Filesystem has nil UUID - can't mount"); |
Christoph Hellwig | 2717420 | 2009-03-30 10:21:31 +0200 | [diff] [blame] | 145 | return XFS_ERROR(EINVAL); |
| 146 | } |
| 147 | |
| 148 | mutex_lock(&xfs_uuid_table_mutex); |
| 149 | for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { |
| 150 | if (uuid_is_nil(&xfs_uuid_table[i])) { |
| 151 | hole = i; |
| 152 | continue; |
| 153 | } |
| 154 | if (uuid_equal(uuid, &xfs_uuid_table[i])) |
| 155 | goto out_duplicate; |
| 156 | } |
| 157 | |
| 158 | if (hole < 0) { |
| 159 | xfs_uuid_table = kmem_realloc(xfs_uuid_table, |
| 160 | (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), |
| 161 | xfs_uuid_table_size * sizeof(*xfs_uuid_table), |
| 162 | KM_SLEEP); |
| 163 | hole = xfs_uuid_table_size++; |
| 164 | } |
| 165 | xfs_uuid_table[hole] = *uuid; |
| 166 | mutex_unlock(&xfs_uuid_table_mutex); |
| 167 | |
| 168 | return 0; |
| 169 | |
| 170 | out_duplicate: |
| 171 | mutex_unlock(&xfs_uuid_table_mutex); |
Mitsuo Hayasaka | 021000e | 2012-01-13 05:58:39 +0000 | [diff] [blame] | 172 | xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); |
Christoph Hellwig | 2717420 | 2009-03-30 10:21:31 +0200 | [diff] [blame] | 173 | return XFS_ERROR(EINVAL); |
| 174 | } |
| 175 | |
| 176 | STATIC void |
| 177 | xfs_uuid_unmount( |
| 178 | struct xfs_mount *mp) |
| 179 | { |
| 180 | uuid_t *uuid = &mp->m_sb.sb_uuid; |
| 181 | int i; |
| 182 | |
| 183 | if (mp->m_flags & XFS_MOUNT_NOUUID) |
| 184 | return; |
| 185 | |
| 186 | mutex_lock(&xfs_uuid_table_mutex); |
| 187 | for (i = 0; i < xfs_uuid_table_size; i++) { |
| 188 | if (uuid_is_nil(&xfs_uuid_table[i])) |
| 189 | continue; |
| 190 | if (!uuid_equal(uuid, &xfs_uuid_table[i])) |
| 191 | continue; |
| 192 | memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); |
| 193 | break; |
| 194 | } |
| 195 | ASSERT(i < xfs_uuid_table_size); |
| 196 | mutex_unlock(&xfs_uuid_table_mutex); |
| 197 | } |
| 198 | |
| 199 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | /* |
Dave Chinner | 0fa800f | 2010-01-11 11:47:46 +0000 | [diff] [blame] | 201 | * Reference counting access wrappers to the perag structures. |
Dave Chinner | e176579 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 202 | * Because we never free per-ag structures, the only thing we |
| 203 | * have to protect against changes is the tree structure itself. |
Dave Chinner | 0fa800f | 2010-01-11 11:47:46 +0000 | [diff] [blame] | 204 | */ |
| 205 | struct xfs_perag * |
| 206 | xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) |
| 207 | { |
| 208 | struct xfs_perag *pag; |
| 209 | int ref = 0; |
| 210 | |
Dave Chinner | e176579 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 211 | rcu_read_lock(); |
Dave Chinner | 0fa800f | 2010-01-11 11:47:46 +0000 | [diff] [blame] | 212 | pag = radix_tree_lookup(&mp->m_perag_tree, agno); |
| 213 | if (pag) { |
| 214 | ASSERT(atomic_read(&pag->pag_ref) >= 0); |
Dave Chinner | 0fa800f | 2010-01-11 11:47:46 +0000 | [diff] [blame] | 215 | ref = atomic_inc_return(&pag->pag_ref); |
| 216 | } |
Dave Chinner | e176579 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 217 | rcu_read_unlock(); |
Dave Chinner | 0fa800f | 2010-01-11 11:47:46 +0000 | [diff] [blame] | 218 | trace_xfs_perag_get(mp, agno, ref, _RET_IP_); |
| 219 | return pag; |
| 220 | } |
| 221 | |
Dave Chinner | 65d0f20 | 2010-09-24 18:40:15 +1000 | [diff] [blame] | 222 | /* |
| 223 | * search from @first to find the next perag with the given tag set. |
| 224 | */ |
| 225 | struct xfs_perag * |
| 226 | xfs_perag_get_tag( |
| 227 | struct xfs_mount *mp, |
| 228 | xfs_agnumber_t first, |
| 229 | int tag) |
| 230 | { |
| 231 | struct xfs_perag *pag; |
| 232 | int found; |
| 233 | int ref; |
| 234 | |
| 235 | rcu_read_lock(); |
| 236 | found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, |
| 237 | (void **)&pag, first, 1, tag); |
| 238 | if (found <= 0) { |
| 239 | rcu_read_unlock(); |
| 240 | return NULL; |
| 241 | } |
| 242 | ref = atomic_inc_return(&pag->pag_ref); |
| 243 | rcu_read_unlock(); |
| 244 | trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_); |
| 245 | return pag; |
| 246 | } |
| 247 | |
Dave Chinner | 0fa800f | 2010-01-11 11:47:46 +0000 | [diff] [blame] | 248 | void |
| 249 | xfs_perag_put(struct xfs_perag *pag) |
| 250 | { |
| 251 | int ref; |
| 252 | |
| 253 | ASSERT(atomic_read(&pag->pag_ref) > 0); |
| 254 | ref = atomic_dec_return(&pag->pag_ref); |
| 255 | trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); |
| 256 | } |
| 257 | |
Dave Chinner | e176579 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 258 | STATIC void |
| 259 | __xfs_free_perag( |
| 260 | struct rcu_head *head) |
| 261 | { |
| 262 | struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); |
| 263 | |
| 264 | ASSERT(atomic_read(&pag->pag_ref) == 0); |
| 265 | kmem_free(pag); |
| 266 | } |
| 267 | |
Dave Chinner | 0fa800f | 2010-01-11 11:47:46 +0000 | [diff] [blame] | 268 | /* |
Dave Chinner | e176579 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 269 | * Free up the per-ag resources associated with the mount structure. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | */ |
Christoph Hellwig | c962fb7 | 2008-05-20 15:10:52 +1000 | [diff] [blame] | 271 | STATIC void |
Christoph Hellwig | ff4f038 | 2008-08-13 16:50:47 +1000 | [diff] [blame] | 272 | xfs_free_perag( |
Christoph Hellwig | 745f691 | 2007-08-30 17:20:39 +1000 | [diff] [blame] | 273 | xfs_mount_t *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | { |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 275 | xfs_agnumber_t agno; |
| 276 | struct xfs_perag *pag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 278 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { |
| 279 | spin_lock(&mp->m_perag_lock); |
| 280 | pag = radix_tree_delete(&mp->m_perag_tree, agno); |
| 281 | spin_unlock(&mp->m_perag_lock); |
Dave Chinner | e176579 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 282 | ASSERT(pag); |
Dave Chinner | f83282a | 2010-11-08 08:55:04 +0000 | [diff] [blame] | 283 | ASSERT(atomic_read(&pag->pag_ref) == 0); |
Dave Chinner | e176579 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 284 | call_rcu(&pag->rcu_head, __xfs_free_perag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | } |
| 287 | |
Nathan Scott | 4cc929e | 2007-05-14 18:24:02 +1000 | [diff] [blame] | 288 | /* |
| 289 | * Check size of device based on the (data/realtime) block count. |
| 290 | * Note: this check is used by the growfs code as well as mount. |
| 291 | */ |
| 292 | int |
| 293 | xfs_sb_validate_fsb_count( |
| 294 | xfs_sb_t *sbp, |
| 295 | __uint64_t nblocks) |
| 296 | { |
| 297 | ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); |
| 298 | ASSERT(sbp->sb_blocklog >= BBSHIFT); |
| 299 | |
| 300 | #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ |
| 301 | if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) |
Eric Sandeen | 657a4cf | 2010-04-30 03:42:49 +0000 | [diff] [blame] | 302 | return EFBIG; |
Nathan Scott | 4cc929e | 2007-05-14 18:24:02 +1000 | [diff] [blame] | 303 | #else /* Limited by UINT_MAX of sectors */ |
| 304 | if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) |
Eric Sandeen | 657a4cf | 2010-04-30 03:42:49 +0000 | [diff] [blame] | 305 | return EFBIG; |
Nathan Scott | 4cc929e | 2007-05-14 18:24:02 +1000 | [diff] [blame] | 306 | #endif |
| 307 | return 0; |
| 308 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | |
| 310 | /* |
| 311 | * Check the validity of the SB found. |
| 312 | */ |
| 313 | STATIC int |
| 314 | xfs_mount_validate_sb( |
| 315 | xfs_mount_t *mp, |
Nathan Scott | 764d1f8 | 2006-03-31 13:04:17 +1000 | [diff] [blame] | 316 | xfs_sb_t *sbp, |
Dave Chinner | 3451018 | 2013-05-27 16:38:19 +1000 | [diff] [blame] | 317 | bool check_inprogress, |
| 318 | bool check_version) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | { |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 320 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | /* |
| 322 | * If the log device and data device have the |
| 323 | * same device number, the log is internal. |
| 324 | * Consequently, the sb_logstart should be non-zero. If |
| 325 | * we have a zero sb_logstart in this case, we may be trying to mount |
| 326 | * a volume filesystem in a non-volume manner. |
| 327 | */ |
| 328 | if (sbp->sb_magicnum != XFS_SB_MAGIC) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 329 | xfs_warn(mp, "bad magic number"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | return XFS_ERROR(EWRONGFS); |
| 331 | } |
| 332 | |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 333 | |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 334 | if (!xfs_sb_good_version(sbp)) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 335 | xfs_warn(mp, "bad version"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | return XFS_ERROR(EWRONGFS); |
| 337 | } |
| 338 | |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 339 | if ((sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) && |
| 340 | (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD | |
| 341 | XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))) { |
| 342 | xfs_notice(mp, |
| 343 | "Super block has XFS_OQUOTA bits along with XFS_PQUOTA and/or XFS_GQUOTA bits.\n"); |
| 344 | return XFS_ERROR(EFSCORRUPTED); |
| 345 | } |
| 346 | |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 347 | /* |
Dave Chinner | e721f50 | 2013-04-03 16:11:32 +1100 | [diff] [blame] | 348 | * Version 5 superblock feature mask validation. Reject combinations the |
Dave Chinner | 3451018 | 2013-05-27 16:38:19 +1000 | [diff] [blame] | 349 | * kernel cannot support up front before checking anything else. For |
| 350 | * write validation, we don't need to check feature masks. |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 351 | */ |
Dave Chinner | 3451018 | 2013-05-27 16:38:19 +1000 | [diff] [blame] | 352 | if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) { |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 353 | xfs_alert(mp, |
Dave Chinner | e721f50 | 2013-04-03 16:11:32 +1100 | [diff] [blame] | 354 | "Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n" |
| 355 | "Use of these features in this kernel is at your own risk!"); |
| 356 | |
| 357 | if (xfs_sb_has_compat_feature(sbp, |
| 358 | XFS_SB_FEAT_COMPAT_UNKNOWN)) { |
| 359 | xfs_warn(mp, |
| 360 | "Superblock has unknown compatible features (0x%x) enabled.\n" |
| 361 | "Using a more recent kernel is recommended.", |
| 362 | (sbp->sb_features_compat & |
| 363 | XFS_SB_FEAT_COMPAT_UNKNOWN)); |
| 364 | } |
| 365 | |
| 366 | if (xfs_sb_has_ro_compat_feature(sbp, |
| 367 | XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { |
| 368 | xfs_alert(mp, |
| 369 | "Superblock has unknown read-only compatible features (0x%x) enabled.", |
| 370 | (sbp->sb_features_ro_compat & |
| 371 | XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); |
| 372 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
| 373 | xfs_warn(mp, |
| 374 | "Attempted to mount read-only compatible filesystem read-write.\n" |
| 375 | "Filesystem can only be safely mounted read only."); |
| 376 | return XFS_ERROR(EINVAL); |
| 377 | } |
| 378 | } |
| 379 | if (xfs_sb_has_incompat_feature(sbp, |
| 380 | XFS_SB_FEAT_INCOMPAT_UNKNOWN)) { |
| 381 | xfs_warn(mp, |
| 382 | "Superblock has unknown incompatible features (0x%x) enabled.\n" |
| 383 | "Filesystem can not be safely mounted by this kernel.", |
| 384 | (sbp->sb_features_incompat & |
| 385 | XFS_SB_FEAT_INCOMPAT_UNKNOWN)); |
| 386 | return XFS_ERROR(EINVAL); |
| 387 | } |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 388 | } |
| 389 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | if (unlikely( |
| 391 | sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 392 | xfs_warn(mp, |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 393 | "filesystem is marked as having an external log; " |
| 394 | "specify logdev on the mount command line."); |
Nathan Scott | 764d1f8 | 2006-03-31 13:04:17 +1000 | [diff] [blame] | 395 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | } |
| 397 | |
| 398 | if (unlikely( |
| 399 | sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 400 | xfs_warn(mp, |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 401 | "filesystem is marked as having an internal log; " |
| 402 | "do not specify logdev on the mount command line."); |
Nathan Scott | 764d1f8 | 2006-03-31 13:04:17 +1000 | [diff] [blame] | 403 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | } |
| 405 | |
| 406 | /* |
Eric Sandeen | c0e090c | 2011-05-20 21:52:17 +0000 | [diff] [blame] | 407 | * More sanity checking. Most of these were stolen directly from |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | * xfs_repair. |
| 409 | */ |
| 410 | if (unlikely( |
| 411 | sbp->sb_agcount <= 0 || |
| 412 | sbp->sb_sectsize < XFS_MIN_SECTORSIZE || |
| 413 | sbp->sb_sectsize > XFS_MAX_SECTORSIZE || |
| 414 | sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG || |
| 415 | sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG || |
Olaf Weber | 2ac00af | 2009-04-17 16:12:45 -0500 | [diff] [blame] | 416 | sbp->sb_sectsize != (1 << sbp->sb_sectlog) || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | sbp->sb_blocksize < XFS_MIN_BLOCKSIZE || |
| 418 | sbp->sb_blocksize > XFS_MAX_BLOCKSIZE || |
| 419 | sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || |
| 420 | sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || |
Olaf Weber | 2ac00af | 2009-04-17 16:12:45 -0500 | [diff] [blame] | 421 | sbp->sb_blocksize != (1 << sbp->sb_blocklog) || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || |
| 423 | sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || |
Nathan Scott | 9f989c9 | 2006-03-14 13:29:32 +1100 | [diff] [blame] | 424 | sbp->sb_inodelog < XFS_DINODE_MIN_LOG || |
| 425 | sbp->sb_inodelog > XFS_DINODE_MAX_LOG || |
Olaf Weber | 2ac00af | 2009-04-17 16:12:45 -0500 | [diff] [blame] | 426 | sbp->sb_inodesize != (1 << sbp->sb_inodelog) || |
Nathan Scott | 9f989c9 | 2006-03-14 13:29:32 +1100 | [diff] [blame] | 427 | (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || |
| 429 | (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || |
Eric Sandeen | c0e090c | 2011-05-20 21:52:17 +0000 | [diff] [blame] | 430 | (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */) || |
| 431 | sbp->sb_dblocks == 0 || |
| 432 | sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) || |
| 433 | sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 434 | XFS_CORRUPTION_ERROR("SB sanity check failed", |
Eric Sandeen | c0e090c | 2011-05-20 21:52:17 +0000 | [diff] [blame] | 435 | XFS_ERRLEVEL_LOW, mp, sbp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | return XFS_ERROR(EFSCORRUPTED); |
| 437 | } |
| 438 | |
Lachlan McIlroy | 2edbddd | 2008-06-27 13:34:34 +1000 | [diff] [blame] | 439 | /* |
| 440 | * Until this is fixed only page-sized or smaller data blocks work. |
| 441 | */ |
| 442 | if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 443 | xfs_warn(mp, |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 444 | "File system with blocksize %d bytes. " |
| 445 | "Only pagesize (%ld) or less will currently work.", |
| 446 | sbp->sb_blocksize, PAGE_SIZE); |
Lachlan McIlroy | 2edbddd | 2008-06-27 13:34:34 +1000 | [diff] [blame] | 447 | return XFS_ERROR(ENOSYS); |
| 448 | } |
| 449 | |
Christoph Hellwig | 1a5902c | 2009-03-29 19:26:46 +0200 | [diff] [blame] | 450 | /* |
| 451 | * Currently only very few inode sizes are supported. |
| 452 | */ |
| 453 | switch (sbp->sb_inodesize) { |
| 454 | case 256: |
| 455 | case 512: |
| 456 | case 1024: |
| 457 | case 2048: |
| 458 | break; |
| 459 | default: |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 460 | xfs_warn(mp, "inode size of %d bytes not supported", |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 461 | sbp->sb_inodesize); |
Christoph Hellwig | 1a5902c | 2009-03-29 19:26:46 +0200 | [diff] [blame] | 462 | return XFS_ERROR(ENOSYS); |
| 463 | } |
| 464 | |
Nathan Scott | 4cc929e | 2007-05-14 18:24:02 +1000 | [diff] [blame] | 465 | if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || |
| 466 | xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 467 | xfs_warn(mp, |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 468 | "file system too large to be mounted on this system."); |
Eric Sandeen | 657a4cf | 2010-04-30 03:42:49 +0000 | [diff] [blame] | 469 | return XFS_ERROR(EFBIG); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | } |
| 471 | |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 472 | if (check_inprogress && sbp->sb_inprogress) { |
| 473 | xfs_warn(mp, "Offline file system operation in progress!"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | return XFS_ERROR(EFSCORRUPTED); |
| 475 | } |
| 476 | |
| 477 | /* |
Nathan Scott | de20614 | 2005-05-05 13:24:13 -0700 | [diff] [blame] | 478 | * Version 1 directory format has never worked on Linux. |
| 479 | */ |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 480 | if (unlikely(!xfs_sb_version_hasdirv2(sbp))) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 481 | xfs_warn(mp, "file system using version 1 directory format"); |
Nathan Scott | de20614 | 2005-05-05 13:24:13 -0700 | [diff] [blame] | 482 | return XFS_ERROR(ENOSYS); |
| 483 | } |
| 484 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | return 0; |
| 486 | } |
| 487 | |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 488 | int |
Nathan Scott | c11e2c3 | 2005-11-02 15:11:45 +1100 | [diff] [blame] | 489 | xfs_initialize_perag( |
Nathan Scott | c11e2c3 | 2005-11-02 15:11:45 +1100 | [diff] [blame] | 490 | xfs_mount_t *mp, |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 491 | xfs_agnumber_t agcount, |
| 492 | xfs_agnumber_t *maxagi) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | { |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 494 | xfs_agnumber_t index; |
Dave Chinner | 8b26c58 | 2010-01-11 11:47:48 +0000 | [diff] [blame] | 495 | xfs_agnumber_t first_initialised = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | xfs_perag_t *pag; |
| 497 | xfs_agino_t agino; |
| 498 | xfs_ino_t ino; |
| 499 | xfs_sb_t *sbp = &mp->m_sb; |
Dave Chinner | 8b26c58 | 2010-01-11 11:47:48 +0000 | [diff] [blame] | 500 | int error = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 502 | /* |
| 503 | * Walk the current per-ag tree so we don't try to initialise AGs |
| 504 | * that already exist (growfs case). Allocate and insert all the |
| 505 | * AGs we don't find ready for initialisation. |
| 506 | */ |
| 507 | for (index = 0; index < agcount; index++) { |
| 508 | pag = xfs_perag_get(mp, index); |
| 509 | if (pag) { |
| 510 | xfs_perag_put(pag); |
| 511 | continue; |
| 512 | } |
Dave Chinner | 8b26c58 | 2010-01-11 11:47:48 +0000 | [diff] [blame] | 513 | if (!first_initialised) |
| 514 | first_initialised = index; |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 515 | |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 516 | pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); |
| 517 | if (!pag) |
Dave Chinner | 8b26c58 | 2010-01-11 11:47:48 +0000 | [diff] [blame] | 518 | goto out_unwind; |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 519 | pag->pag_agno = index; |
| 520 | pag->pag_mount = mp; |
Dave Chinner | 1a427ab | 2010-12-16 17:08:41 +1100 | [diff] [blame] | 521 | spin_lock_init(&pag->pag_ici_lock); |
Dave Chinner | 69b491c | 2010-09-27 11:09:51 +1000 | [diff] [blame] | 522 | mutex_init(&pag->pag_ici_reclaim_lock); |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 523 | INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); |
Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 524 | spin_lock_init(&pag->pag_buf_lock); |
| 525 | pag->pag_buf_tree = RB_ROOT; |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 526 | |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 527 | if (radix_tree_preload(GFP_NOFS)) |
Dave Chinner | 8b26c58 | 2010-01-11 11:47:48 +0000 | [diff] [blame] | 528 | goto out_unwind; |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 529 | |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 530 | spin_lock(&mp->m_perag_lock); |
| 531 | if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { |
| 532 | BUG(); |
| 533 | spin_unlock(&mp->m_perag_lock); |
Dave Chinner | 8b26c58 | 2010-01-11 11:47:48 +0000 | [diff] [blame] | 534 | radix_tree_preload_end(); |
| 535 | error = -EEXIST; |
| 536 | goto out_unwind; |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 537 | } |
| 538 | spin_unlock(&mp->m_perag_lock); |
| 539 | radix_tree_preload_end(); |
| 540 | } |
| 541 | |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 542 | /* |
| 543 | * If we mount with the inode64 option, or no inode overflows |
| 544 | * the legacy 32-bit address space clear the inode32 option. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | */ |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 546 | agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); |
| 547 | ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 549 | if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) |
| 550 | mp->m_flags |= XFS_MOUNT_32BITINODES; |
| 551 | else |
| 552 | mp->m_flags &= ~XFS_MOUNT_32BITINODES; |
| 553 | |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 554 | if (mp->m_flags & XFS_MOUNT_32BITINODES) |
| 555 | index = xfs_set_inode32(mp); |
| 556 | else |
| 557 | index = xfs_set_inode64(mp); |
Christoph Hellwig | fb3b504 | 2010-05-28 19:03:10 +0000 | [diff] [blame] | 558 | |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 559 | if (maxagi) |
| 560 | *maxagi = index; |
| 561 | return 0; |
Dave Chinner | 8b26c58 | 2010-01-11 11:47:48 +0000 | [diff] [blame] | 562 | |
| 563 | out_unwind: |
| 564 | kmem_free(pag); |
| 565 | for (; index > first_initialised; index--) { |
| 566 | pag = radix_tree_delete(&mp->m_perag_tree, index); |
| 567 | kmem_free(pag); |
| 568 | } |
| 569 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | } |
| 571 | |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 572 | static void |
| 573 | xfs_sb_quota_from_disk(struct xfs_sb *sbp) |
| 574 | { |
Chandra Seetharaman | 0102629 | 2013-07-19 17:32:55 -0500 | [diff] [blame^] | 575 | /* |
| 576 | * older mkfs doesn't initialize quota inodes to NULLFSINO. This |
| 577 | * leads to in-core values having two different values for a quota |
| 578 | * inode to be invalid: 0 and NULLFSINO. Change it to a single value |
| 579 | * NULLFSINO. |
| 580 | * |
| 581 | * Note that this change affect only the in-core values. These |
| 582 | * values are not written back to disk unless any quota information |
| 583 | * is written to the disk. Even in that case, sb_pquotino field is |
| 584 | * not written to disk unless the superblock supports pquotino. |
| 585 | */ |
| 586 | if (sbp->sb_uquotino == 0) |
| 587 | sbp->sb_uquotino = NULLFSINO; |
| 588 | if (sbp->sb_gquotino == 0) |
| 589 | sbp->sb_gquotino = NULLFSINO; |
| 590 | if (sbp->sb_pquotino == 0) |
| 591 | sbp->sb_pquotino = NULLFSINO; |
| 592 | |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 593 | if (sbp->sb_qflags & XFS_OQUOTA_ENFD) |
| 594 | sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ? |
| 595 | XFS_PQUOTA_ENFD : XFS_GQUOTA_ENFD; |
| 596 | if (sbp->sb_qflags & XFS_OQUOTA_CHKD) |
| 597 | sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ? |
| 598 | XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD; |
| 599 | sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD); |
| 600 | } |
| 601 | |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 602 | void |
| 603 | xfs_sb_from_disk( |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 604 | struct xfs_sb *to, |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 605 | xfs_dsb_t *from) |
| 606 | { |
| 607 | to->sb_magicnum = be32_to_cpu(from->sb_magicnum); |
| 608 | to->sb_blocksize = be32_to_cpu(from->sb_blocksize); |
| 609 | to->sb_dblocks = be64_to_cpu(from->sb_dblocks); |
| 610 | to->sb_rblocks = be64_to_cpu(from->sb_rblocks); |
| 611 | to->sb_rextents = be64_to_cpu(from->sb_rextents); |
| 612 | memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid)); |
| 613 | to->sb_logstart = be64_to_cpu(from->sb_logstart); |
| 614 | to->sb_rootino = be64_to_cpu(from->sb_rootino); |
| 615 | to->sb_rbmino = be64_to_cpu(from->sb_rbmino); |
| 616 | to->sb_rsumino = be64_to_cpu(from->sb_rsumino); |
| 617 | to->sb_rextsize = be32_to_cpu(from->sb_rextsize); |
| 618 | to->sb_agblocks = be32_to_cpu(from->sb_agblocks); |
| 619 | to->sb_agcount = be32_to_cpu(from->sb_agcount); |
| 620 | to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks); |
| 621 | to->sb_logblocks = be32_to_cpu(from->sb_logblocks); |
| 622 | to->sb_versionnum = be16_to_cpu(from->sb_versionnum); |
| 623 | to->sb_sectsize = be16_to_cpu(from->sb_sectsize); |
| 624 | to->sb_inodesize = be16_to_cpu(from->sb_inodesize); |
| 625 | to->sb_inopblock = be16_to_cpu(from->sb_inopblock); |
| 626 | memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname)); |
| 627 | to->sb_blocklog = from->sb_blocklog; |
| 628 | to->sb_sectlog = from->sb_sectlog; |
| 629 | to->sb_inodelog = from->sb_inodelog; |
| 630 | to->sb_inopblog = from->sb_inopblog; |
| 631 | to->sb_agblklog = from->sb_agblklog; |
| 632 | to->sb_rextslog = from->sb_rextslog; |
| 633 | to->sb_inprogress = from->sb_inprogress; |
| 634 | to->sb_imax_pct = from->sb_imax_pct; |
| 635 | to->sb_icount = be64_to_cpu(from->sb_icount); |
| 636 | to->sb_ifree = be64_to_cpu(from->sb_ifree); |
| 637 | to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks); |
| 638 | to->sb_frextents = be64_to_cpu(from->sb_frextents); |
| 639 | to->sb_uquotino = be64_to_cpu(from->sb_uquotino); |
| 640 | to->sb_gquotino = be64_to_cpu(from->sb_gquotino); |
| 641 | to->sb_qflags = be16_to_cpu(from->sb_qflags); |
| 642 | to->sb_flags = from->sb_flags; |
| 643 | to->sb_shared_vn = from->sb_shared_vn; |
| 644 | to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt); |
| 645 | to->sb_unit = be32_to_cpu(from->sb_unit); |
| 646 | to->sb_width = be32_to_cpu(from->sb_width); |
| 647 | to->sb_dirblklog = from->sb_dirblklog; |
| 648 | to->sb_logsectlog = from->sb_logsectlog; |
| 649 | to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize); |
| 650 | to->sb_logsunit = be32_to_cpu(from->sb_logsunit); |
| 651 | to->sb_features2 = be32_to_cpu(from->sb_features2); |
David Chinner | ee1c090 | 2008-03-06 13:45:50 +1100 | [diff] [blame] | 652 | to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2); |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 653 | to->sb_features_compat = be32_to_cpu(from->sb_features_compat); |
| 654 | to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat); |
| 655 | to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat); |
Dave Chinner | e721f50 | 2013-04-03 16:11:32 +1100 | [diff] [blame] | 656 | to->sb_features_log_incompat = |
| 657 | be32_to_cpu(from->sb_features_log_incompat); |
| 658 | to->sb_pad = 0; |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 659 | to->sb_pquotino = be64_to_cpu(from->sb_pquotino); |
| 660 | to->sb_lsn = be64_to_cpu(from->sb_lsn); |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 661 | } |
| 662 | |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 663 | static inline void |
| 664 | xfs_sb_quota_to_disk( |
| 665 | xfs_dsb_t *to, |
| 666 | xfs_sb_t *from, |
| 667 | __int64_t *fields) |
| 668 | { |
| 669 | __uint16_t qflags = from->sb_qflags; |
| 670 | |
| 671 | if (*fields & XFS_SB_QFLAGS) { |
| 672 | /* |
| 673 | * The in-core version of sb_qflags do not have |
| 674 | * XFS_OQUOTA_* flags, whereas the on-disk version |
| 675 | * does. So, convert incore XFS_{PG}QUOTA_* flags |
| 676 | * to on-disk XFS_OQUOTA_* flags. |
| 677 | */ |
| 678 | qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD | |
| 679 | XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD); |
| 680 | |
| 681 | if (from->sb_qflags & |
| 682 | (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD)) |
| 683 | qflags |= XFS_OQUOTA_ENFD; |
| 684 | if (from->sb_qflags & |
| 685 | (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) |
| 686 | qflags |= XFS_OQUOTA_CHKD; |
| 687 | to->sb_qflags = cpu_to_be16(qflags); |
| 688 | *fields &= ~XFS_SB_QFLAGS; |
| 689 | } |
| 690 | } |
| 691 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | /* |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 693 | * Copy in core superblock to ondisk one. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | * |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 695 | * The fields argument is mask of superblock fields to copy. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | */ |
| 697 | void |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 698 | xfs_sb_to_disk( |
| 699 | xfs_dsb_t *to, |
| 700 | xfs_sb_t *from, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | __int64_t fields) |
| 702 | { |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 703 | xfs_caddr_t to_ptr = (xfs_caddr_t)to; |
| 704 | xfs_caddr_t from_ptr = (xfs_caddr_t)from; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | xfs_sb_field_t f; |
| 706 | int first; |
| 707 | int size; |
| 708 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | ASSERT(fields); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | if (!fields) |
| 711 | return; |
| 712 | |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 713 | xfs_sb_quota_to_disk(to, from, &fields); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | while (fields) { |
| 715 | f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); |
| 716 | first = xfs_sb_info[f].offset; |
| 717 | size = xfs_sb_info[f + 1].offset - first; |
| 718 | |
| 719 | ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); |
| 720 | |
| 721 | if (size == 1 || xfs_sb_info[f].type == 1) { |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 722 | memcpy(to_ptr + first, from_ptr + first, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | } else { |
| 724 | switch (size) { |
| 725 | case 2: |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 726 | *(__be16 *)(to_ptr + first) = |
| 727 | cpu_to_be16(*(__u16 *)(from_ptr + first)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | break; |
| 729 | case 4: |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 730 | *(__be32 *)(to_ptr + first) = |
| 731 | cpu_to_be32(*(__u32 *)(from_ptr + first)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | break; |
| 733 | case 8: |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 734 | *(__be64 *)(to_ptr + first) = |
| 735 | cpu_to_be64(*(__u64 *)(from_ptr + first)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | break; |
| 737 | default: |
| 738 | ASSERT(0); |
| 739 | } |
| 740 | } |
| 741 | |
| 742 | fields &= ~(1LL << f); |
| 743 | } |
| 744 | } |
| 745 | |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 746 | static int |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 747 | xfs_sb_verify( |
Dave Chinner | 3451018 | 2013-05-27 16:38:19 +1000 | [diff] [blame] | 748 | struct xfs_buf *bp, |
| 749 | bool check_version) |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 750 | { |
| 751 | struct xfs_mount *mp = bp->b_target->bt_mount; |
| 752 | struct xfs_sb sb; |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 753 | |
| 754 | xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp)); |
| 755 | |
| 756 | /* |
| 757 | * Only check the in progress field for the primary superblock as |
| 758 | * mkfs.xfs doesn't clear it from secondary superblocks. |
| 759 | */ |
Dave Chinner | 3451018 | 2013-05-27 16:38:19 +1000 | [diff] [blame] | 760 | return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, |
| 761 | check_version); |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 762 | } |
| 763 | |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 764 | /* |
| 765 | * If the superblock has the CRC feature bit set or the CRC field is non-null, |
| 766 | * check that the CRC is valid. We check the CRC field is non-null because a |
| 767 | * single bit error could clear the feature bit and unused parts of the |
| 768 | * superblock are supposed to be zero. Hence a non-null crc field indicates that |
| 769 | * we've potentially lost a feature bit and we should check it anyway. |
| 770 | */ |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 771 | static void |
Dave Chinner | 612cfbf | 2012-11-14 17:52:32 +1100 | [diff] [blame] | 772 | xfs_sb_read_verify( |
| 773 | struct xfs_buf *bp) |
| 774 | { |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 775 | struct xfs_mount *mp = bp->b_target->bt_mount; |
| 776 | struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); |
| 777 | int error; |
| 778 | |
| 779 | /* |
| 780 | * open code the version check to avoid needing to convert the entire |
| 781 | * superblock from disk order just to check the version number |
| 782 | */ |
| 783 | if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC) && |
| 784 | (((be16_to_cpu(dsb->sb_versionnum) & XFS_SB_VERSION_NUMBITS) == |
| 785 | XFS_SB_VERSION_5) || |
| 786 | dsb->sb_crc != 0)) { |
| 787 | |
| 788 | if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize), |
| 789 | offsetof(struct xfs_sb, sb_crc))) { |
| 790 | error = EFSCORRUPTED; |
| 791 | goto out_error; |
| 792 | } |
| 793 | } |
Dave Chinner | 3451018 | 2013-05-27 16:38:19 +1000 | [diff] [blame] | 794 | error = xfs_sb_verify(bp, true); |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 795 | |
| 796 | out_error: |
| 797 | if (error) { |
| 798 | XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); |
| 799 | xfs_buf_ioerror(bp, error); |
| 800 | } |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 801 | } |
| 802 | |
| 803 | /* |
| 804 | * We may be probed for a filesystem match, so we may not want to emit |
| 805 | * messages when the superblock buffer is not actually an XFS superblock. |
| 806 | * If we find an XFS superblock, the run a normal, noisy mount because we are |
| 807 | * really going to mount it and want to know about errors. |
| 808 | */ |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 809 | static void |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 810 | xfs_sb_quiet_read_verify( |
| 811 | struct xfs_buf *bp) |
| 812 | { |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 813 | struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 814 | |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 815 | |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 816 | if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) { |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 817 | /* XFS filesystem, verify noisily! */ |
| 818 | xfs_sb_read_verify(bp); |
| 819 | return; |
| 820 | } |
| 821 | /* quietly fail */ |
Eric Sandeen | aeb4f20 | 2013-01-16 17:33:53 -0600 | [diff] [blame] | 822 | xfs_buf_ioerror(bp, EWRONGFS); |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 823 | } |
| 824 | |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 825 | static void |
| 826 | xfs_sb_write_verify( |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 827 | struct xfs_buf *bp) |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 828 | { |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 829 | struct xfs_mount *mp = bp->b_target->bt_mount; |
| 830 | struct xfs_buf_log_item *bip = bp->b_fspriv; |
| 831 | int error; |
| 832 | |
Dave Chinner | 3451018 | 2013-05-27 16:38:19 +1000 | [diff] [blame] | 833 | error = xfs_sb_verify(bp, false); |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 834 | if (error) { |
| 835 | XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); |
| 836 | xfs_buf_ioerror(bp, error); |
| 837 | return; |
| 838 | } |
| 839 | |
| 840 | if (!xfs_sb_version_hascrc(&mp->m_sb)) |
| 841 | return; |
| 842 | |
| 843 | if (bip) |
| 844 | XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn); |
| 845 | |
| 846 | xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), |
| 847 | offsetof(struct xfs_sb, sb_crc)); |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 848 | } |
| 849 | |
| 850 | const struct xfs_buf_ops xfs_sb_buf_ops = { |
| 851 | .verify_read = xfs_sb_read_verify, |
| 852 | .verify_write = xfs_sb_write_verify, |
| 853 | }; |
| 854 | |
| 855 | static const struct xfs_buf_ops xfs_sb_quiet_buf_ops = { |
| 856 | .verify_read = xfs_sb_quiet_read_verify, |
| 857 | .verify_write = xfs_sb_write_verify, |
| 858 | }; |
| 859 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | /* |
| 861 | * xfs_readsb |
| 862 | * |
| 863 | * Does the initial read of the superblock. |
| 864 | */ |
| 865 | int |
Nathan Scott | 764d1f8 | 2006-03-31 13:04:17 +1000 | [diff] [blame] | 866 | xfs_readsb(xfs_mount_t *mp, int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | { |
| 868 | unsigned int sector_size; |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 869 | struct xfs_buf *bp; |
| 870 | struct xfs_sb *sbp = &mp->m_sb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | int error; |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 872 | int loud = !(flags & XFS_MFSI_QUIET); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | |
| 874 | ASSERT(mp->m_sb_bp == NULL); |
| 875 | ASSERT(mp->m_ddev_targp != NULL); |
| 876 | |
| 877 | /* |
| 878 | * Allocate a (locked) buffer to hold the superblock. |
| 879 | * This will be kept around at all times to optimize |
| 880 | * access to the superblock. |
| 881 | */ |
| 882 | sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 884 | reread: |
Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 885 | bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 886 | BTOBB(sector_size), 0, |
Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 887 | loud ? &xfs_sb_buf_ops |
| 888 | : &xfs_sb_quiet_buf_ops); |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 889 | if (!bp) { |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 890 | if (loud) |
| 891 | xfs_warn(mp, "SB buffer read failed"); |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 892 | return EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | } |
Dave Chinner | eab4e63 | 2012-11-12 22:54:02 +1100 | [diff] [blame] | 894 | if (bp->b_error) { |
| 895 | error = bp->b_error; |
| 896 | if (loud) |
Dave Chinner | e721f50 | 2013-04-03 16:11:32 +1100 | [diff] [blame] | 897 | xfs_warn(mp, "SB validate failed with error %d.", error); |
Dave Chinner | eab4e63 | 2012-11-12 22:54:02 +1100 | [diff] [blame] | 898 | goto release_buf; |
| 899 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | |
| 901 | /* |
| 902 | * Initialize the mount structure from the superblock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | */ |
Dave Chinner | 9802182 | 2012-11-12 22:54:03 +1100 | [diff] [blame] | 904 | xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 | |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 906 | xfs_sb_quota_from_disk(&mp->m_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | /* |
| 908 | * We must be able to do sector-sized and sector-aligned IO. |
| 909 | */ |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 910 | if (sector_size > sbp->sb_sectsize) { |
Dave Chinner | af34e09 | 2011-03-07 10:04:35 +1100 | [diff] [blame] | 911 | if (loud) |
| 912 | xfs_warn(mp, "device supports %u byte sectors (not %u)", |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 913 | sector_size, sbp->sb_sectsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | error = ENOSYS; |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 915 | goto release_buf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | } |
| 917 | |
| 918 | /* |
| 919 | * If device sector size is smaller than the superblock size, |
| 920 | * re-read the superblock so the buffer is correctly sized. |
| 921 | */ |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 922 | if (sector_size < sbp->sb_sectsize) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | xfs_buf_relse(bp); |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 924 | sector_size = sbp->sb_sectsize; |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 925 | goto reread; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | } |
| 927 | |
Lachlan McIlroy | 5478eea | 2007-02-10 18:36:29 +1100 | [diff] [blame] | 928 | /* Initialize per-cpu counters */ |
| 929 | xfs_icsb_reinit_counters(mp); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 930 | |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 931 | /* no need to be quiet anymore, so reset the buf ops */ |
| 932 | bp->b_ops = &xfs_sb_buf_ops; |
| 933 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | mp->m_sb_bp = bp; |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 935 | xfs_buf_unlock(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | return 0; |
| 937 | |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 938 | release_buf: |
| 939 | xfs_buf_relse(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | return error; |
| 941 | } |
| 942 | |
| 943 | |
| 944 | /* |
| 945 | * xfs_mount_common |
| 946 | * |
| 947 | * Mount initialization code establishing various mount |
| 948 | * fields from the superblock associated with the given |
| 949 | * mount structure |
| 950 | */ |
Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 951 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) |
| 953 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | mp->m_agfrotor = mp->m_agirotor = 0; |
Eric Sandeen | 007c61c | 2007-10-11 17:43:56 +1000 | [diff] [blame] | 955 | spin_lock_init(&mp->m_agirotor_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | mp->m_maxagi = mp->m_sb.sb_agcount; |
| 957 | mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; |
| 958 | mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; |
| 959 | mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT; |
| 960 | mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; |
| 961 | mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | mp->m_blockmask = sbp->sb_blocksize - 1; |
| 963 | mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; |
| 964 | mp->m_blockwmask = mp->m_blockwsize - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | |
Christoph Hellwig | 60197e8 | 2008-10-30 17:11:19 +1100 | [diff] [blame] | 966 | mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1); |
| 967 | mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0); |
| 968 | mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2; |
| 969 | mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2; |
| 970 | |
| 971 | mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); |
| 972 | mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); |
| 973 | mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2; |
| 974 | mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2; |
| 975 | |
| 976 | mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1); |
| 977 | mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0); |
| 978 | mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2; |
| 979 | mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | |
| 981 | mp->m_bsize = XFS_FSB_TO_BB(mp, 1); |
| 982 | mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK, |
| 983 | sbp->sb_inopblock); |
| 984 | mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; |
| 985 | } |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 986 | |
| 987 | /* |
| 988 | * xfs_initialize_perag_data |
| 989 | * |
| 990 | * Read in each per-ag structure so we can count up the number of |
| 991 | * allocated inodes, free inodes and used filesystem blocks as this |
| 992 | * information is no longer persistent in the superblock. Once we have |
| 993 | * this information, write it into the in-core superblock structure. |
| 994 | */ |
| 995 | STATIC int |
| 996 | xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) |
| 997 | { |
| 998 | xfs_agnumber_t index; |
| 999 | xfs_perag_t *pag; |
| 1000 | xfs_sb_t *sbp = &mp->m_sb; |
| 1001 | uint64_t ifree = 0; |
| 1002 | uint64_t ialloc = 0; |
| 1003 | uint64_t bfree = 0; |
| 1004 | uint64_t bfreelst = 0; |
| 1005 | uint64_t btree = 0; |
| 1006 | int error; |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1007 | |
| 1008 | for (index = 0; index < agcount; index++) { |
| 1009 | /* |
| 1010 | * read the agf, then the agi. This gets us |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 1011 | * all the information we need and populates the |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1012 | * per-ag structures for us. |
| 1013 | */ |
| 1014 | error = xfs_alloc_pagf_init(mp, NULL, index, 0); |
| 1015 | if (error) |
| 1016 | return error; |
| 1017 | |
| 1018 | error = xfs_ialloc_pagi_init(mp, NULL, index); |
| 1019 | if (error) |
| 1020 | return error; |
Dave Chinner | 44b56e0 | 2010-01-11 11:47:43 +0000 | [diff] [blame] | 1021 | pag = xfs_perag_get(mp, index); |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1022 | ifree += pag->pagi_freecount; |
| 1023 | ialloc += pag->pagi_count; |
| 1024 | bfree += pag->pagf_freeblks; |
| 1025 | bfreelst += pag->pagf_flcount; |
| 1026 | btree += pag->pagf_btreeblks; |
Dave Chinner | 44b56e0 | 2010-01-11 11:47:43 +0000 | [diff] [blame] | 1027 | xfs_perag_put(pag); |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1028 | } |
| 1029 | /* |
| 1030 | * Overwrite incore superblock counters with just-read data |
| 1031 | */ |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 1032 | spin_lock(&mp->m_sb_lock); |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1033 | sbp->sb_ifree = ifree; |
| 1034 | sbp->sb_icount = ialloc; |
| 1035 | sbp->sb_fdblocks = bfree + bfreelst + btree; |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 1036 | spin_unlock(&mp->m_sb_lock); |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1037 | |
| 1038 | /* Fixup the per-cpu counters as well. */ |
| 1039 | xfs_icsb_reinit_counters(mp); |
| 1040 | |
| 1041 | return 0; |
| 1042 | } |
| 1043 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 | /* |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1045 | * Update alignment values based on mount options and sb values |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | */ |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1047 | STATIC int |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1048 | xfs_update_alignment(xfs_mount_t *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1049 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | xfs_sb_t *sbp = &(mp->m_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 1052 | if (mp->m_dalign) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1053 | /* |
| 1054 | * If stripe unit and stripe width are not multiples |
| 1055 | * of the fs blocksize turn off alignment. |
| 1056 | */ |
| 1057 | if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || |
| 1058 | (BBTOB(mp->m_swidth) & mp->m_blockmask)) { |
Jie Liu | 39a45d8 | 2013-05-02 19:27:47 +0800 | [diff] [blame] | 1059 | xfs_warn(mp, |
| 1060 | "alignment check failed: sunit/swidth vs. blocksize(%d)", |
| 1061 | sbp->sb_blocksize); |
| 1062 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | } else { |
| 1064 | /* |
| 1065 | * Convert the stripe unit and width to FSBs. |
| 1066 | */ |
| 1067 | mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); |
| 1068 | if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { |
Dave Chinner | 5348778 | 2011-03-07 10:05:35 +1100 | [diff] [blame] | 1069 | xfs_warn(mp, |
Jie Liu | 39a45d8 | 2013-05-02 19:27:47 +0800 | [diff] [blame] | 1070 | "alignment check failed: sunit/swidth vs. agsize(%d)", |
| 1071 | sbp->sb_agblocks); |
| 1072 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | } else if (mp->m_dalign) { |
| 1074 | mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); |
| 1075 | } else { |
Jie Liu | 39a45d8 | 2013-05-02 19:27:47 +0800 | [diff] [blame] | 1076 | xfs_warn(mp, |
| 1077 | "alignment check failed: sunit(%d) less than bsize(%d)", |
| 1078 | mp->m_dalign, sbp->sb_blocksize); |
| 1079 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1080 | } |
| 1081 | } |
| 1082 | |
| 1083 | /* |
| 1084 | * Update superblock with new values |
| 1085 | * and log changes |
| 1086 | */ |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1087 | if (xfs_sb_version_hasdalign(sbp)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | if (sbp->sb_unit != mp->m_dalign) { |
| 1089 | sbp->sb_unit = mp->m_dalign; |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1090 | mp->m_update_flags |= XFS_SB_UNIT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 | } |
| 1092 | if (sbp->sb_width != mp->m_swidth) { |
| 1093 | sbp->sb_width = mp->m_swidth; |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1094 | mp->m_update_flags |= XFS_SB_WIDTH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | } |
Jie Liu | 34d7f60 | 2013-05-02 19:27:53 +0800 | [diff] [blame] | 1096 | } else { |
| 1097 | xfs_warn(mp, |
| 1098 | "cannot change alignment: superblock does not support data alignment"); |
| 1099 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 | } |
| 1101 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1102 | xfs_sb_version_hasdalign(&mp->m_sb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | mp->m_dalign = sbp->sb_unit; |
| 1104 | mp->m_swidth = sbp->sb_width; |
| 1105 | } |
| 1106 | |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1107 | return 0; |
| 1108 | } |
| 1109 | |
| 1110 | /* |
| 1111 | * Set the maximum inode count for this filesystem |
| 1112 | */ |
| 1113 | STATIC void |
| 1114 | xfs_set_maxicount(xfs_mount_t *mp) |
| 1115 | { |
| 1116 | xfs_sb_t *sbp = &(mp->m_sb); |
| 1117 | __uint64_t icount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | |
| 1119 | if (sbp->sb_imax_pct) { |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1120 | /* |
| 1121 | * Make sure the maximum inode count is a multiple |
| 1122 | * of the units we allocate inodes in. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1123 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | icount = sbp->sb_dblocks * sbp->sb_imax_pct; |
| 1125 | do_div(icount, 100); |
| 1126 | do_div(icount, mp->m_ialloc_blks); |
| 1127 | mp->m_maxicount = (icount * mp->m_ialloc_blks) << |
| 1128 | sbp->sb_inopblog; |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1129 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | mp->m_maxicount = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1131 | } |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1132 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1134 | /* |
| 1135 | * Set the default minimum read and write sizes unless |
| 1136 | * already specified in a mount option. |
| 1137 | * We use smaller I/O sizes when the file system |
| 1138 | * is being used for NFS service (wsync mount option). |
| 1139 | */ |
| 1140 | STATIC void |
| 1141 | xfs_set_rw_sizes(xfs_mount_t *mp) |
| 1142 | { |
| 1143 | xfs_sb_t *sbp = &(mp->m_sb); |
| 1144 | int readio_log, writeio_log; |
| 1145 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { |
| 1147 | if (mp->m_flags & XFS_MOUNT_WSYNC) { |
| 1148 | readio_log = XFS_WSYNC_READIO_LOG; |
| 1149 | writeio_log = XFS_WSYNC_WRITEIO_LOG; |
| 1150 | } else { |
| 1151 | readio_log = XFS_READIO_LOG_LARGE; |
| 1152 | writeio_log = XFS_WRITEIO_LOG_LARGE; |
| 1153 | } |
| 1154 | } else { |
| 1155 | readio_log = mp->m_readio_log; |
| 1156 | writeio_log = mp->m_writeio_log; |
| 1157 | } |
| 1158 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | if (sbp->sb_blocklog > readio_log) { |
| 1160 | mp->m_readio_log = sbp->sb_blocklog; |
| 1161 | } else { |
| 1162 | mp->m_readio_log = readio_log; |
| 1163 | } |
| 1164 | mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); |
| 1165 | if (sbp->sb_blocklog > writeio_log) { |
| 1166 | mp->m_writeio_log = sbp->sb_blocklog; |
| 1167 | } else { |
| 1168 | mp->m_writeio_log = writeio_log; |
| 1169 | } |
| 1170 | mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1171 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1172 | |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1173 | /* |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 1174 | * precalculate the low space thresholds for dynamic speculative preallocation. |
| 1175 | */ |
| 1176 | void |
| 1177 | xfs_set_low_space_thresholds( |
| 1178 | struct xfs_mount *mp) |
| 1179 | { |
| 1180 | int i; |
| 1181 | |
| 1182 | for (i = 0; i < XFS_LOWSP_MAX; i++) { |
| 1183 | __uint64_t space = mp->m_sb.sb_dblocks; |
| 1184 | |
| 1185 | do_div(space, 100); |
| 1186 | mp->m_low_space[i] = space * (i + 1); |
| 1187 | } |
| 1188 | } |
| 1189 | |
| 1190 | |
| 1191 | /* |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1192 | * Set whether we're using inode alignment. |
| 1193 | */ |
| 1194 | STATIC void |
| 1195 | xfs_set_inoalignment(xfs_mount_t *mp) |
| 1196 | { |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1197 | if (xfs_sb_version_hasalign(&mp->m_sb) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | mp->m_sb.sb_inoalignmt >= |
| 1199 | XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) |
| 1200 | mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; |
| 1201 | else |
| 1202 | mp->m_inoalign_mask = 0; |
| 1203 | /* |
| 1204 | * If we are using stripe alignment, check whether |
| 1205 | * the stripe unit is a multiple of the inode alignment |
| 1206 | */ |
| 1207 | if (mp->m_dalign && mp->m_inoalign_mask && |
| 1208 | !(mp->m_dalign & mp->m_inoalign_mask)) |
| 1209 | mp->m_sinoalign = mp->m_dalign; |
| 1210 | else |
| 1211 | mp->m_sinoalign = 0; |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1212 | } |
| 1213 | |
| 1214 | /* |
| 1215 | * Check that the data (and log if separate) are an ok size. |
| 1216 | */ |
| 1217 | STATIC int |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 1218 | xfs_check_sizes(xfs_mount_t *mp) |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1219 | { |
| 1220 | xfs_buf_t *bp; |
| 1221 | xfs_daddr_t d; |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1222 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); |
| 1224 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1225 | xfs_warn(mp, "filesystem size mismatch detected"); |
Eric Sandeen | 657a4cf | 2010-04-30 03:42:49 +0000 | [diff] [blame] | 1226 | return XFS_ERROR(EFBIG); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | } |
Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 1228 | bp = xfs_buf_read_uncached(mp->m_ddev_targp, |
Dave Chinner | 1922c94 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1229 | d - XFS_FSS_TO_BB(mp, 1), |
Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 1230 | XFS_FSS_TO_BB(mp, 1), 0, NULL); |
Dave Chinner | 1922c94 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1231 | if (!bp) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1232 | xfs_warn(mp, "last sector read failed"); |
Dave Chinner | 1922c94 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1233 | return EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | } |
Dave Chinner | 1922c94 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1235 | xfs_buf_relse(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1236 | |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 1237 | if (mp->m_logdev_targp != mp->m_ddev_targp) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); |
| 1239 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1240 | xfs_warn(mp, "log size mismatch detected"); |
Eric Sandeen | 657a4cf | 2010-04-30 03:42:49 +0000 | [diff] [blame] | 1241 | return XFS_ERROR(EFBIG); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1242 | } |
Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 1243 | bp = xfs_buf_read_uncached(mp->m_logdev_targp, |
Dave Chinner | 1922c94 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1244 | d - XFS_FSB_TO_BB(mp, 1), |
Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 1245 | XFS_FSB_TO_BB(mp, 1), 0, NULL); |
Dave Chinner | 1922c94 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1246 | if (!bp) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1247 | xfs_warn(mp, "log device read failed"); |
Dave Chinner | 1922c94 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1248 | return EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | } |
Dave Chinner | 1922c94 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1250 | xfs_buf_relse(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | } |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1252 | return 0; |
| 1253 | } |
| 1254 | |
| 1255 | /* |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1256 | * Clear the quotaflags in memory and in the superblock. |
| 1257 | */ |
| 1258 | int |
| 1259 | xfs_mount_reset_sbqflags( |
| 1260 | struct xfs_mount *mp) |
| 1261 | { |
| 1262 | int error; |
| 1263 | struct xfs_trans *tp; |
| 1264 | |
| 1265 | mp->m_qflags = 0; |
| 1266 | |
| 1267 | /* |
| 1268 | * It is OK to look at sb_qflags here in mount path, |
| 1269 | * without m_sb_lock. |
| 1270 | */ |
| 1271 | if (mp->m_sb.sb_qflags == 0) |
| 1272 | return 0; |
| 1273 | spin_lock(&mp->m_sb_lock); |
| 1274 | mp->m_sb.sb_qflags = 0; |
| 1275 | spin_unlock(&mp->m_sb_lock); |
| 1276 | |
| 1277 | /* |
| 1278 | * If the fs is readonly, let the incore superblock run |
| 1279 | * with quotas off but don't flush the update out to disk |
| 1280 | */ |
| 1281 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
| 1282 | return 0; |
| 1283 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1284 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); |
Jeff Liu | b0c10b98 | 2013-01-28 21:26:16 +0800 | [diff] [blame] | 1285 | error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp), |
| 1286 | 0, 0, XFS_DEFAULT_LOG_COUNT); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1287 | if (error) { |
| 1288 | xfs_trans_cancel(tp, 0); |
Dave Chinner | 5348778 | 2011-03-07 10:05:35 +1100 | [diff] [blame] | 1289 | xfs_alert(mp, "%s: Superblock update failed!", __func__); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1290 | return error; |
| 1291 | } |
| 1292 | |
| 1293 | xfs_mod_sb(tp, XFS_SB_QFLAGS); |
| 1294 | return xfs_trans_commit(tp, 0); |
| 1295 | } |
| 1296 | |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 1297 | __uint64_t |
| 1298 | xfs_default_resblks(xfs_mount_t *mp) |
| 1299 | { |
| 1300 | __uint64_t resblks; |
| 1301 | |
| 1302 | /* |
Dave Chinner | 8babd8a | 2010-03-04 01:46:25 +0000 | [diff] [blame] | 1303 | * We default to 5% or 8192 fsbs of space reserved, whichever is |
| 1304 | * smaller. This is intended to cover concurrent allocation |
| 1305 | * transactions when we initially hit enospc. These each require a 4 |
| 1306 | * block reservation. Hence by default we cover roughly 2000 concurrent |
| 1307 | * allocation reservations. |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 1308 | */ |
| 1309 | resblks = mp->m_sb.sb_dblocks; |
| 1310 | do_div(resblks, 20); |
Dave Chinner | 8babd8a | 2010-03-04 01:46:25 +0000 | [diff] [blame] | 1311 | resblks = min_t(__uint64_t, resblks, 8192); |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 1312 | return resblks; |
| 1313 | } |
| 1314 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1315 | /* |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1316 | * This function does the following on an initial mount of a file system: |
| 1317 | * - reads the superblock from disk and init the mount struct |
| 1318 | * - if we're a 32-bit kernel, do a size check on the superblock |
| 1319 | * so we don't mount terabyte filesystems |
| 1320 | * - init mount struct realtime fields |
| 1321 | * - allocate inode hash table for fs |
| 1322 | * - init directory manager |
| 1323 | * - perform recovery and init the log manager |
| 1324 | */ |
| 1325 | int |
| 1326 | xfs_mountfs( |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 1327 | xfs_mount_t *mp) |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1328 | { |
| 1329 | xfs_sb_t *sbp = &(mp->m_sb); |
| 1330 | xfs_inode_t *rip; |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1331 | __uint64_t resblks; |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1332 | uint quotamount = 0; |
| 1333 | uint quotaflags = 0; |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1334 | int error = 0; |
| 1335 | |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1336 | xfs_mount_common(mp, sbp); |
| 1337 | |
| 1338 | /* |
Eric Sandeen | e6957ea | 2008-04-10 12:19:34 +1000 | [diff] [blame] | 1339 | * Check for a mismatched features2 values. Older kernels |
| 1340 | * read & wrote into the wrong sb offset for sb_features2 |
| 1341 | * on some platforms due to xfs_sb_t not being 64bit size aligned |
| 1342 | * when sb_features2 was added, which made older superblock |
| 1343 | * reading/writing routines swap it as a 64-bit value. |
David Chinner | ee1c090 | 2008-03-06 13:45:50 +1100 | [diff] [blame] | 1344 | * |
Eric Sandeen | e6957ea | 2008-04-10 12:19:34 +1000 | [diff] [blame] | 1345 | * For backwards compatibility, we make both slots equal. |
| 1346 | * |
| 1347 | * If we detect a mismatched field, we OR the set bits into the |
| 1348 | * existing features2 field in case it has already been modified; we |
| 1349 | * don't want to lose any features. We then update the bad location |
| 1350 | * with the ORed value so that older kernels will see any features2 |
| 1351 | * flags, and mark the two fields as needing updates once the |
| 1352 | * transaction subsystem is online. |
David Chinner | ee1c090 | 2008-03-06 13:45:50 +1100 | [diff] [blame] | 1353 | */ |
Eric Sandeen | e6957ea | 2008-04-10 12:19:34 +1000 | [diff] [blame] | 1354 | if (xfs_sb_has_mismatched_features2(sbp)) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1355 | xfs_warn(mp, "correcting sb_features alignment problem"); |
David Chinner | ee1c090 | 2008-03-06 13:45:50 +1100 | [diff] [blame] | 1356 | sbp->sb_features2 |= sbp->sb_bad_features2; |
Eric Sandeen | e6957ea | 2008-04-10 12:19:34 +1000 | [diff] [blame] | 1357 | sbp->sb_bad_features2 = sbp->sb_features2; |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1358 | mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; |
Eric Sandeen | e6957ea | 2008-04-10 12:19:34 +1000 | [diff] [blame] | 1359 | |
| 1360 | /* |
| 1361 | * Re-check for ATTR2 in case it was found in bad_features2 |
| 1362 | * slot. |
| 1363 | */ |
Tim Shimmin | 7c12f29 | 2008-04-30 18:15:28 +1000 | [diff] [blame] | 1364 | if (xfs_sb_version_hasattr2(&mp->m_sb) && |
| 1365 | !(mp->m_flags & XFS_MOUNT_NOATTR2)) |
Eric Sandeen | e6957ea | 2008-04-10 12:19:34 +1000 | [diff] [blame] | 1366 | mp->m_flags |= XFS_MOUNT_ATTR2; |
Tim Shimmin | 7c12f29 | 2008-04-30 18:15:28 +1000 | [diff] [blame] | 1367 | } |
Eric Sandeen | e6957ea | 2008-04-10 12:19:34 +1000 | [diff] [blame] | 1368 | |
Tim Shimmin | 7c12f29 | 2008-04-30 18:15:28 +1000 | [diff] [blame] | 1369 | if (xfs_sb_version_hasattr2(&mp->m_sb) && |
| 1370 | (mp->m_flags & XFS_MOUNT_NOATTR2)) { |
| 1371 | xfs_sb_version_removeattr2(&mp->m_sb); |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1372 | mp->m_update_flags |= XFS_SB_FEATURES2; |
Tim Shimmin | 7c12f29 | 2008-04-30 18:15:28 +1000 | [diff] [blame] | 1373 | |
| 1374 | /* update sb_versionnum for the clearing of the morebits */ |
| 1375 | if (!sbp->sb_features2) |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1376 | mp->m_update_flags |= XFS_SB_VERSIONNUM; |
David Chinner | ee1c090 | 2008-03-06 13:45:50 +1100 | [diff] [blame] | 1377 | } |
| 1378 | |
| 1379 | /* |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1380 | * Check if sb_agblocks is aligned at stripe boundary |
| 1381 | * If sb_agblocks is NOT aligned turn off m_dalign since |
| 1382 | * allocator alignment is within an ag, therefore ag has |
| 1383 | * to be aligned at stripe boundary. |
| 1384 | */ |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1385 | error = xfs_update_alignment(mp); |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1386 | if (error) |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1387 | goto out; |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1388 | |
| 1389 | xfs_alloc_compute_maxlevels(mp); |
| 1390 | xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); |
| 1391 | xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); |
| 1392 | xfs_ialloc_compute_maxlevels(mp); |
| 1393 | |
| 1394 | xfs_set_maxicount(mp); |
| 1395 | |
Christoph Hellwig | 2717420 | 2009-03-30 10:21:31 +0200 | [diff] [blame] | 1396 | error = xfs_uuid_mount(mp); |
| 1397 | if (error) |
| 1398 | goto out; |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1399 | |
| 1400 | /* |
| 1401 | * Set the minimum read and write sizes |
| 1402 | */ |
| 1403 | xfs_set_rw_sizes(mp); |
| 1404 | |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 1405 | /* set the low space thresholds for dynamic preallocation */ |
| 1406 | xfs_set_low_space_thresholds(mp); |
| 1407 | |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1408 | /* |
| 1409 | * Set the inode cluster size. |
| 1410 | * This may still be overridden by the file system |
| 1411 | * block size if it is larger than the chosen cluster size. |
| 1412 | */ |
| 1413 | mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; |
| 1414 | |
| 1415 | /* |
| 1416 | * Set inode alignment fields |
| 1417 | */ |
| 1418 | xfs_set_inoalignment(mp); |
| 1419 | |
| 1420 | /* |
| 1421 | * Check that the data (and log if separate) are an ok size. |
| 1422 | */ |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 1423 | error = xfs_check_sizes(mp); |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1424 | if (error) |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1425 | goto out_remove_uuid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | |
| 1427 | /* |
| 1428 | * Initialize realtime fields in the mount structure |
| 1429 | */ |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1430 | error = xfs_rtmount_init(mp); |
| 1431 | if (error) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1432 | xfs_warn(mp, "RT mount failed"); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1433 | goto out_remove_uuid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | } |
| 1435 | |
| 1436 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | * Copies the low order bits of the timestamp and the randomly |
| 1438 | * set "sequence" number out of a UUID. |
| 1439 | */ |
| 1440 | uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); |
| 1441 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | mp->m_dmevmask = 0; /* not persistent; set after each mount */ |
| 1443 | |
Nathan Scott | f6c2d1f | 2006-06-20 13:04:51 +1000 | [diff] [blame] | 1444 | xfs_dir_mount(mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 | |
| 1446 | /* |
| 1447 | * Initialize the attribute manager's entries. |
| 1448 | */ |
| 1449 | mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100; |
| 1450 | |
| 1451 | /* |
| 1452 | * Initialize the precomputed transaction reservations values. |
| 1453 | */ |
| 1454 | xfs_trans_init(mp); |
| 1455 | |
| 1456 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1457 | * Allocate and initialize the per-ag data. |
| 1458 | */ |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 1459 | spin_lock_init(&mp->m_perag_lock); |
Dave Chinner | 9b98b6f | 2010-05-27 01:58:13 +0000 | [diff] [blame] | 1460 | INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 1461 | error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); |
| 1462 | if (error) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1463 | xfs_warn(mp, "Failed per-ag init: %d", error); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1464 | goto out_remove_uuid; |
Dave Chinner | 1c1c6eb | 2010-01-11 11:47:44 +0000 | [diff] [blame] | 1465 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1466 | |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1467 | if (!sbp->sb_logblocks) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1468 | xfs_warn(mp, "no log defined"); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1469 | XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); |
| 1470 | error = XFS_ERROR(EFSCORRUPTED); |
| 1471 | goto out_free_perag; |
| 1472 | } |
| 1473 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1474 | /* |
| 1475 | * log's mount-time initialization. Perform 1st part recovery if needed |
| 1476 | */ |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1477 | error = xfs_log_mount(mp, mp->m_logdev_targp, |
| 1478 | XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), |
| 1479 | XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); |
| 1480 | if (error) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1481 | xfs_warn(mp, "log mount failed"); |
Dave Chinner | d4f3512 | 2012-04-23 15:59:06 +1000 | [diff] [blame] | 1482 | goto out_fail_wait; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | } |
| 1484 | |
| 1485 | /* |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1486 | * Now the log is mounted, we know if it was an unclean shutdown or |
| 1487 | * not. If it was, with the first phase of recovery has completed, we |
| 1488 | * have consistent AG blocks on disk. We have not recovered EFIs yet, |
| 1489 | * but they are recovered transactionally in the second recovery phase |
| 1490 | * later. |
| 1491 | * |
| 1492 | * Hence we can safely re-initialise incore superblock counters from |
| 1493 | * the per-ag data. These may not be correct if the filesystem was not |
| 1494 | * cleanly unmounted, so we need to wait for recovery to finish before |
| 1495 | * doing this. |
| 1496 | * |
| 1497 | * If the filesystem was cleanly unmounted, then we can trust the |
| 1498 | * values in the superblock to be correct and we don't need to do |
| 1499 | * anything here. |
| 1500 | * |
| 1501 | * If we are currently making the filesystem, the initialisation will |
| 1502 | * fail as the perag data is in an undefined state. |
| 1503 | */ |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1504 | if (xfs_sb_version_haslazysbcount(&mp->m_sb) && |
| 1505 | !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && |
| 1506 | !mp->m_sb.sb_inprogress) { |
| 1507 | error = xfs_initialize_perag_data(mp, sbp->sb_agcount); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1508 | if (error) |
Dave Chinner | d4f3512 | 2012-04-23 15:59:06 +1000 | [diff] [blame] | 1509 | goto out_fail_wait; |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1510 | } |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1511 | |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1512 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | * Get and sanity-check the root inode. |
| 1514 | * Save the pointer to it in the mount structure. |
| 1515 | */ |
Dave Chinner | 7b6259e | 2010-06-24 11:35:17 +1000 | [diff] [blame] | 1516 | error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | if (error) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1518 | xfs_warn(mp, "failed to read root inode"); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1519 | goto out_log_dealloc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | } |
| 1521 | |
| 1522 | ASSERT(rip != NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | |
Al Viro | abbede1 | 2011-07-26 02:31:30 -0400 | [diff] [blame] | 1524 | if (unlikely(!S_ISDIR(rip->i_d.di_mode))) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1525 | xfs_warn(mp, "corrupted root inode %llu: not a directory", |
Nathan Scott | b657452 | 2006-06-09 15:29:40 +1000 | [diff] [blame] | 1526 | (unsigned long long)rip->i_ino); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | xfs_iunlock(rip, XFS_ILOCK_EXCL); |
| 1528 | XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, |
| 1529 | mp); |
| 1530 | error = XFS_ERROR(EFSCORRUPTED); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1531 | goto out_rele_rip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 | } |
| 1533 | mp->m_rootip = rip; /* save it */ |
| 1534 | |
| 1535 | xfs_iunlock(rip, XFS_ILOCK_EXCL); |
| 1536 | |
| 1537 | /* |
| 1538 | * Initialize realtime inode pointers in the mount structure |
| 1539 | */ |
Eric Sandeen | 0771fb4 | 2007-10-12 11:03:40 +1000 | [diff] [blame] | 1540 | error = xfs_rtmount_inodes(mp); |
| 1541 | if (error) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1542 | /* |
| 1543 | * Free up the root inode. |
| 1544 | */ |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1545 | xfs_warn(mp, "failed to read RT inodes"); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1546 | goto out_rele_rip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 | } |
| 1548 | |
| 1549 | /* |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1550 | * If this is a read-only mount defer the superblock updates until |
| 1551 | * the next remount into writeable mode. Otherwise we would never |
| 1552 | * perform the update e.g. for the root filesystem. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 | */ |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 1554 | if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { |
| 1555 | error = xfs_mount_log_sb(mp, mp->m_update_flags); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 1556 | if (error) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1557 | xfs_warn(mp, "failed to write sb changes"); |
Christoph Hellwig | b93b6e4 | 2009-02-04 09:33:58 +0100 | [diff] [blame] | 1558 | goto out_rtunmount; |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 1559 | } |
| 1560 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1561 | |
| 1562 | /* |
| 1563 | * Initialise the XFS quota management subsystem for this mount |
| 1564 | */ |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1565 | if (XFS_IS_QUOTA_RUNNING(mp)) { |
| 1566 | error = xfs_qm_newmount(mp, "amount, "aflags); |
| 1567 | if (error) |
| 1568 | goto out_rtunmount; |
| 1569 | } else { |
| 1570 | ASSERT(!XFS_IS_QUOTA_ON(mp)); |
| 1571 | |
| 1572 | /* |
| 1573 | * If a file system had quotas running earlier, but decided to |
| 1574 | * mount without -o uquota/pquota/gquota options, revoke the |
| 1575 | * quotachecked license. |
| 1576 | */ |
| 1577 | if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1578 | xfs_notice(mp, "resetting quota flags"); |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1579 | error = xfs_mount_reset_sbqflags(mp); |
| 1580 | if (error) |
| 1581 | return error; |
| 1582 | } |
| 1583 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 | |
| 1585 | /* |
| 1586 | * Finish recovering the file system. This part needed to be |
| 1587 | * delayed until after the root and real-time bitmap inodes |
| 1588 | * were consistently read in. |
| 1589 | */ |
Christoph Hellwig | 4249023 | 2008-08-13 16:49:32 +1000 | [diff] [blame] | 1590 | error = xfs_log_mount_finish(mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | if (error) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1592 | xfs_warn(mp, "log mount finish failed"); |
Christoph Hellwig | b93b6e4 | 2009-02-04 09:33:58 +0100 | [diff] [blame] | 1593 | goto out_rtunmount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1594 | } |
| 1595 | |
| 1596 | /* |
| 1597 | * Complete the quota initialisation, post-log-replay component. |
| 1598 | */ |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1599 | if (quotamount) { |
| 1600 | ASSERT(mp->m_qflags == 0); |
| 1601 | mp->m_qflags = quotaflags; |
| 1602 | |
| 1603 | xfs_qm_mount_quotas(mp); |
| 1604 | } |
| 1605 | |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 1606 | /* |
| 1607 | * Now we are mounted, reserve a small amount of unused space for |
| 1608 | * privileged transactions. This is needed so that transaction |
| 1609 | * space required for critical operations can dip into this pool |
| 1610 | * when at ENOSPC. This is needed for operations like create with |
| 1611 | * attr, unwritten extent conversion at ENOSPC, etc. Data allocations |
| 1612 | * are not allowed to use this reserved space. |
Dave Chinner | 8babd8a | 2010-03-04 01:46:25 +0000 | [diff] [blame] | 1613 | * |
| 1614 | * This may drive us straight to ENOSPC on mount, but that implies |
| 1615 | * we were already there on the last unmount. Warn if this occurs. |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 1616 | */ |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 1617 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
| 1618 | resblks = xfs_default_resblks(mp); |
| 1619 | error = xfs_reserve_blocks(mp, &resblks, NULL); |
| 1620 | if (error) |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1621 | xfs_warn(mp, |
| 1622 | "Unable to allocate reserve blocks. Continuing without reserve pool."); |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 1623 | } |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 1624 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1625 | return 0; |
| 1626 | |
Christoph Hellwig | b93b6e4 | 2009-02-04 09:33:58 +0100 | [diff] [blame] | 1627 | out_rtunmount: |
| 1628 | xfs_rtunmount_inodes(mp); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1629 | out_rele_rip: |
Christoph Hellwig | 4335509 | 2008-03-27 18:01:08 +1100 | [diff] [blame] | 1630 | IRELE(rip); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1631 | out_log_dealloc: |
Christoph Hellwig | 21b699c | 2009-03-16 08:19:29 +0100 | [diff] [blame] | 1632 | xfs_log_unmount(mp); |
Dave Chinner | d4f3512 | 2012-04-23 15:59:06 +1000 | [diff] [blame] | 1633 | out_fail_wait: |
| 1634 | if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) |
| 1635 | xfs_wait_buftarg(mp->m_logdev_targp); |
| 1636 | xfs_wait_buftarg(mp->m_ddev_targp); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1637 | out_free_perag: |
Christoph Hellwig | ff4f038 | 2008-08-13 16:50:47 +1000 | [diff] [blame] | 1638 | xfs_free_perag(mp); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1639 | out_remove_uuid: |
Christoph Hellwig | 2717420 | 2009-03-30 10:21:31 +0200 | [diff] [blame] | 1640 | xfs_uuid_unmount(mp); |
Christoph Hellwig | f9057e3 | 2009-02-04 09:31:52 +0100 | [diff] [blame] | 1641 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1642 | return error; |
| 1643 | } |
| 1644 | |
| 1645 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 | * This flushes out the inodes,dquots and the superblock, unmounts the |
| 1647 | * log and makes sure that incore structures are freed. |
| 1648 | */ |
Christoph Hellwig | 41b5c2e | 2008-08-13 16:49:57 +1000 | [diff] [blame] | 1649 | void |
| 1650 | xfs_unmountfs( |
| 1651 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1652 | { |
Christoph Hellwig | 41b5c2e | 2008-08-13 16:49:57 +1000 | [diff] [blame] | 1653 | __uint64_t resblks; |
| 1654 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1655 | |
Brian Foster | 579b62f | 2012-11-06 09:50:47 -0500 | [diff] [blame] | 1656 | cancel_delayed_work_sync(&mp->m_eofblocks_work); |
| 1657 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1658 | xfs_qm_unmount_quotas(mp); |
Christoph Hellwig | b93b6e4 | 2009-02-04 09:33:58 +0100 | [diff] [blame] | 1659 | xfs_rtunmount_inodes(mp); |
Christoph Hellwig | 77508ec | 2008-08-13 16:49:04 +1000 | [diff] [blame] | 1660 | IRELE(mp->m_rootip); |
| 1661 | |
David Chinner | 641c56f | 2007-06-18 16:50:17 +1000 | [diff] [blame] | 1662 | /* |
| 1663 | * We can potentially deadlock here if we have an inode cluster |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 1664 | * that has been freed has its buffer still pinned in memory because |
David Chinner | 641c56f | 2007-06-18 16:50:17 +1000 | [diff] [blame] | 1665 | * the transaction is still sitting in a iclog. The stale inodes |
| 1666 | * on that buffer will have their flush locks held until the |
| 1667 | * transaction hits the disk and the callbacks run. the inode |
| 1668 | * flush takes the flush lock unconditionally and with nothing to |
| 1669 | * push out the iclog we will never get that unlocked. hence we |
| 1670 | * need to force the log first. |
| 1671 | */ |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 1672 | xfs_log_force(mp, XFS_LOG_SYNC); |
Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 1673 | |
| 1674 | /* |
Christoph Hellwig | 211e4d4 | 2012-04-23 15:58:34 +1000 | [diff] [blame] | 1675 | * Flush all pending changes from the AIL. |
Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 1676 | */ |
Christoph Hellwig | 211e4d4 | 2012-04-23 15:58:34 +1000 | [diff] [blame] | 1677 | xfs_ail_push_all_sync(mp->m_ail); |
| 1678 | |
| 1679 | /* |
| 1680 | * And reclaim all inodes. At this point there should be no dirty |
Dave Chinner | 7e18530 | 2012-10-08 21:56:00 +1100 | [diff] [blame] | 1681 | * inodes and none should be pinned or locked, but use synchronous |
| 1682 | * reclaim just to be sure. We can stop background inode reclaim |
| 1683 | * here as well if it is still running. |
Christoph Hellwig | 211e4d4 | 2012-04-23 15:58:34 +1000 | [diff] [blame] | 1684 | */ |
Dave Chinner | 7e18530 | 2012-10-08 21:56:00 +1100 | [diff] [blame] | 1685 | cancel_delayed_work_sync(&mp->m_reclaim_work); |
Dave Chinner | c854363 | 2010-02-06 12:39:36 +1100 | [diff] [blame] | 1686 | xfs_reclaim_inodes(mp, SYNC_WAIT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1688 | xfs_qm_unmount(mp); |
Lachlan McIlroy | a357a12 | 2008-10-30 16:53:25 +1100 | [diff] [blame] | 1689 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | /* |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 1691 | * Unreserve any blocks we have so that when we unmount we don't account |
| 1692 | * the reserved free space as used. This is really only necessary for |
| 1693 | * lazy superblock counting because it trusts the incore superblock |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 1694 | * counters to be absolutely correct on clean unmount. |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 1695 | * |
| 1696 | * We don't bother correcting this elsewhere for lazy superblock |
| 1697 | * counting because on mount of an unclean filesystem we reconstruct the |
| 1698 | * correct counter value and this is irrelevant. |
| 1699 | * |
| 1700 | * For non-lazy counter filesystems, this doesn't matter at all because |
| 1701 | * we only every apply deltas to the superblock and hence the incore |
| 1702 | * value does not matter.... |
| 1703 | */ |
| 1704 | resblks = 0; |
David Chinner | 714082b | 2008-04-10 12:20:03 +1000 | [diff] [blame] | 1705 | error = xfs_reserve_blocks(mp, &resblks, NULL); |
| 1706 | if (error) |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1707 | xfs_warn(mp, "Unable to free reserved block pool. " |
David Chinner | 714082b | 2008-04-10 12:20:03 +1000 | [diff] [blame] | 1708 | "Freespace may not be correct on next mount."); |
| 1709 | |
Chandra Seetharaman | adab0f6 | 2011-06-29 22:10:14 +0000 | [diff] [blame] | 1710 | error = xfs_log_sbcount(mp); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 1711 | if (error) |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 1712 | xfs_warn(mp, "Unable to update superblock counters. " |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 1713 | "Freespace may not be correct on next mount."); |
Christoph Hellwig | 87c7bec | 2011-09-14 14:08:26 +0000 | [diff] [blame] | 1714 | |
Christoph Hellwig | 21b699c | 2009-03-16 08:19:29 +0100 | [diff] [blame] | 1715 | xfs_log_unmount(mp); |
Christoph Hellwig | 2717420 | 2009-03-30 10:21:31 +0200 | [diff] [blame] | 1716 | xfs_uuid_unmount(mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1717 | |
Christoph Hellwig | 1550d0b | 2008-08-13 16:17:37 +1000 | [diff] [blame] | 1718 | #if defined(DEBUG) |
Christoph Hellwig | 0ce4cfd | 2007-08-30 17:20:53 +1000 | [diff] [blame] | 1719 | xfs_errortag_clearall(mp, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | #endif |
Christoph Hellwig | ff4f038 | 2008-08-13 16:50:47 +1000 | [diff] [blame] | 1721 | xfs_free_perag(mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1722 | } |
| 1723 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1724 | int |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1725 | xfs_fs_writable(xfs_mount_t *mp) |
| 1726 | { |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 1727 | return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) || |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 1728 | (mp->m_flags & XFS_MOUNT_RDONLY)); |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1729 | } |
| 1730 | |
| 1731 | /* |
Alex Elder | b2ce397 | 2011-07-11 09:51:44 -0500 | [diff] [blame] | 1732 | * xfs_log_sbcount |
| 1733 | * |
Chandra Seetharaman | adab0f6 | 2011-06-29 22:10:14 +0000 | [diff] [blame] | 1734 | * Sync the superblock counters to disk. |
Alex Elder | b2ce397 | 2011-07-11 09:51:44 -0500 | [diff] [blame] | 1735 | * |
| 1736 | * Note this code can be called during the process of freezing, so |
Chandra Seetharaman | adab0f6 | 2011-06-29 22:10:14 +0000 | [diff] [blame] | 1737 | * we may need to use the transaction allocator which does not |
Alex Elder | b2ce397 | 2011-07-11 09:51:44 -0500 | [diff] [blame] | 1738 | * block when the transaction subsystem is in its frozen state. |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1739 | */ |
| 1740 | int |
Chandra Seetharaman | adab0f6 | 2011-06-29 22:10:14 +0000 | [diff] [blame] | 1741 | xfs_log_sbcount(xfs_mount_t *mp) |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1742 | { |
| 1743 | xfs_trans_t *tp; |
| 1744 | int error; |
| 1745 | |
| 1746 | if (!xfs_fs_writable(mp)) |
| 1747 | return 0; |
| 1748 | |
Christoph Hellwig | d4d90b5 | 2008-04-22 17:34:37 +1000 | [diff] [blame] | 1749 | xfs_icsb_sync_counters(mp, 0); |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1750 | |
| 1751 | /* |
| 1752 | * we don't need to do this if we are updating the superblock |
| 1753 | * counters on every modification. |
| 1754 | */ |
| 1755 | if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) |
| 1756 | return 0; |
| 1757 | |
Alex Elder | b2ce397 | 2011-07-11 09:51:44 -0500 | [diff] [blame] | 1758 | tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); |
Jeff Liu | e457274 | 2013-01-28 21:27:31 +0800 | [diff] [blame] | 1759 | error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0, |
| 1760 | XFS_DEFAULT_LOG_COUNT); |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1761 | if (error) { |
| 1762 | xfs_trans_cancel(tp, 0); |
| 1763 | return error; |
| 1764 | } |
| 1765 | |
| 1766 | xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); |
Chandra Seetharaman | adab0f6 | 2011-06-29 22:10:14 +0000 | [diff] [blame] | 1767 | xfs_trans_set_sync(tp); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 1768 | error = xfs_trans_commit(tp, 0); |
| 1769 | return error; |
David Chinner | 92821e2 | 2007-05-24 15:26:31 +1000 | [diff] [blame] | 1770 | } |
| 1771 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 | /* |
| 1773 | * xfs_mod_sb() can be used to copy arbitrary changes to the |
| 1774 | * in-core superblock into the superblock buffer to be logged. |
| 1775 | * It does not provide the higher level of locking that is |
| 1776 | * needed to protect the in-core superblock from concurrent |
| 1777 | * access. |
| 1778 | */ |
| 1779 | void |
| 1780 | xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) |
| 1781 | { |
| 1782 | xfs_buf_t *bp; |
| 1783 | int first; |
| 1784 | int last; |
| 1785 | xfs_mount_t *mp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1786 | xfs_sb_field_t f; |
| 1787 | |
| 1788 | ASSERT(fields); |
| 1789 | if (!fields) |
| 1790 | return; |
| 1791 | mp = tp->t_mountp; |
| 1792 | bp = xfs_trans_getsb(tp, mp, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1793 | first = sizeof(xfs_sb_t); |
| 1794 | last = 0; |
| 1795 | |
| 1796 | /* translate/copy */ |
| 1797 | |
Christoph Hellwig | 2bdf7cd | 2007-08-28 13:58:06 +1000 | [diff] [blame] | 1798 | xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | |
| 1800 | /* find modified range */ |
Dave Chinner | 587aa0f | 2010-01-20 12:04:53 +1100 | [diff] [blame] | 1801 | f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); |
| 1802 | ASSERT((1LL << f) & XFS_SB_MOD_BITS); |
| 1803 | last = xfs_sb_info[f + 1].offset - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1804 | |
| 1805 | f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); |
| 1806 | ASSERT((1LL << f) & XFS_SB_MOD_BITS); |
| 1807 | first = xfs_sb_info[f].offset; |
| 1808 | |
Dave Chinner | 04a1e6c | 2013-04-03 16:11:31 +1100 | [diff] [blame] | 1809 | xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1810 | xfs_trans_log_buf(tp, bp, first, last); |
| 1811 | } |
Yingping Lu | d210a28 | 2006-06-09 14:55:18 +1000 | [diff] [blame] | 1812 | |
Yingping Lu | d210a28 | 2006-06-09 14:55:18 +1000 | [diff] [blame] | 1813 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1814 | /* |
| 1815 | * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply |
| 1816 | * a delta to a specified field in the in-core superblock. Simply |
| 1817 | * switch on the field indicated and apply the delta to that field. |
| 1818 | * Fields are not allowed to dip below zero, so if the delta would |
| 1819 | * do this do not apply it and return EINVAL. |
| 1820 | * |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 1821 | * The m_sb_lock must be held when this routine is called. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1822 | */ |
Eric Sandeen | d96f8f8 | 2009-07-02 00:09:33 -0500 | [diff] [blame] | 1823 | STATIC int |
David Chinner | 20f4ebf | 2007-02-10 18:36:10 +1100 | [diff] [blame] | 1824 | xfs_mod_incore_sb_unlocked( |
| 1825 | xfs_mount_t *mp, |
| 1826 | xfs_sb_field_t field, |
| 1827 | int64_t delta, |
| 1828 | int rsvd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1829 | { |
| 1830 | int scounter; /* short counter for 32 bit fields */ |
| 1831 | long long lcounter; /* long counter for 64 bit fields */ |
| 1832 | long long res_used, rem; |
| 1833 | |
| 1834 | /* |
| 1835 | * With the in-core superblock spin lock held, switch |
| 1836 | * on the indicated field. Apply the delta to the |
| 1837 | * proper field. If the fields value would dip below |
| 1838 | * 0, then do not apply the delta and return EINVAL. |
| 1839 | */ |
| 1840 | switch (field) { |
| 1841 | case XFS_SBS_ICOUNT: |
| 1842 | lcounter = (long long)mp->m_sb.sb_icount; |
| 1843 | lcounter += delta; |
| 1844 | if (lcounter < 0) { |
| 1845 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1846 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1847 | } |
| 1848 | mp->m_sb.sb_icount = lcounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1849 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1850 | case XFS_SBS_IFREE: |
| 1851 | lcounter = (long long)mp->m_sb.sb_ifree; |
| 1852 | lcounter += delta; |
| 1853 | if (lcounter < 0) { |
| 1854 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1855 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1856 | } |
| 1857 | mp->m_sb.sb_ifree = lcounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1858 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 | case XFS_SBS_FDBLOCKS: |
David Chinner | 4be536d | 2006-09-07 14:26:50 +1000 | [diff] [blame] | 1860 | lcounter = (long long) |
| 1861 | mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1862 | res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); |
| 1863 | |
| 1864 | if (delta > 0) { /* Putting blocks back */ |
| 1865 | if (res_used > delta) { |
| 1866 | mp->m_resblks_avail += delta; |
| 1867 | } else { |
| 1868 | rem = delta - res_used; |
| 1869 | mp->m_resblks_avail = mp->m_resblks; |
| 1870 | lcounter += rem; |
| 1871 | } |
| 1872 | } else { /* Taking blocks away */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1873 | lcounter += delta; |
Dave Chinner | 8babd8a | 2010-03-04 01:46:25 +0000 | [diff] [blame] | 1874 | if (lcounter >= 0) { |
| 1875 | mp->m_sb.sb_fdblocks = lcounter + |
| 1876 | XFS_ALLOC_SET_ASIDE(mp); |
| 1877 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1878 | } |
Dave Chinner | 8babd8a | 2010-03-04 01:46:25 +0000 | [diff] [blame] | 1879 | |
| 1880 | /* |
| 1881 | * We are out of blocks, use any available reserved |
| 1882 | * blocks if were allowed to. |
| 1883 | */ |
| 1884 | if (!rsvd) |
| 1885 | return XFS_ERROR(ENOSPC); |
| 1886 | |
| 1887 | lcounter = (long long)mp->m_resblks_avail + delta; |
| 1888 | if (lcounter >= 0) { |
| 1889 | mp->m_resblks_avail = lcounter; |
| 1890 | return 0; |
| 1891 | } |
| 1892 | printk_once(KERN_WARNING |
| 1893 | "Filesystem \"%s\": reserve blocks depleted! " |
| 1894 | "Consider increasing reserve pool size.", |
| 1895 | mp->m_fsname); |
| 1896 | return XFS_ERROR(ENOSPC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 | } |
| 1898 | |
David Chinner | 4be536d | 2006-09-07 14:26:50 +1000 | [diff] [blame] | 1899 | mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1900 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1901 | case XFS_SBS_FREXTENTS: |
| 1902 | lcounter = (long long)mp->m_sb.sb_frextents; |
| 1903 | lcounter += delta; |
| 1904 | if (lcounter < 0) { |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1905 | return XFS_ERROR(ENOSPC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1906 | } |
| 1907 | mp->m_sb.sb_frextents = lcounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1908 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1909 | case XFS_SBS_DBLOCKS: |
| 1910 | lcounter = (long long)mp->m_sb.sb_dblocks; |
| 1911 | lcounter += delta; |
| 1912 | if (lcounter < 0) { |
| 1913 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1914 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1915 | } |
| 1916 | mp->m_sb.sb_dblocks = lcounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1917 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1918 | case XFS_SBS_AGCOUNT: |
| 1919 | scounter = mp->m_sb.sb_agcount; |
| 1920 | scounter += delta; |
| 1921 | if (scounter < 0) { |
| 1922 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1923 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1924 | } |
| 1925 | mp->m_sb.sb_agcount = scounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1926 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1927 | case XFS_SBS_IMAX_PCT: |
| 1928 | scounter = mp->m_sb.sb_imax_pct; |
| 1929 | scounter += delta; |
| 1930 | if (scounter < 0) { |
| 1931 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1932 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 | } |
| 1934 | mp->m_sb.sb_imax_pct = scounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1935 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1936 | case XFS_SBS_REXTSIZE: |
| 1937 | scounter = mp->m_sb.sb_rextsize; |
| 1938 | scounter += delta; |
| 1939 | if (scounter < 0) { |
| 1940 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1941 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1942 | } |
| 1943 | mp->m_sb.sb_rextsize = scounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1944 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1945 | case XFS_SBS_RBMBLOCKS: |
| 1946 | scounter = mp->m_sb.sb_rbmblocks; |
| 1947 | scounter += delta; |
| 1948 | if (scounter < 0) { |
| 1949 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1950 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1951 | } |
| 1952 | mp->m_sb.sb_rbmblocks = scounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1953 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1954 | case XFS_SBS_RBLOCKS: |
| 1955 | lcounter = (long long)mp->m_sb.sb_rblocks; |
| 1956 | lcounter += delta; |
| 1957 | if (lcounter < 0) { |
| 1958 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1959 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1960 | } |
| 1961 | mp->m_sb.sb_rblocks = lcounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1962 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1963 | case XFS_SBS_REXTENTS: |
| 1964 | lcounter = (long long)mp->m_sb.sb_rextents; |
| 1965 | lcounter += delta; |
| 1966 | if (lcounter < 0) { |
| 1967 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1968 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | } |
| 1970 | mp->m_sb.sb_rextents = lcounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1971 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1972 | case XFS_SBS_REXTSLOG: |
| 1973 | scounter = mp->m_sb.sb_rextslog; |
| 1974 | scounter += delta; |
| 1975 | if (scounter < 0) { |
| 1976 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1977 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1978 | } |
| 1979 | mp->m_sb.sb_rextslog = scounter; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1980 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1981 | default: |
| 1982 | ASSERT(0); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1983 | return XFS_ERROR(EINVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1984 | } |
| 1985 | } |
| 1986 | |
| 1987 | /* |
| 1988 | * xfs_mod_incore_sb() is used to change a field in the in-core |
| 1989 | * superblock structure by the specified delta. This modification |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 1990 | * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1991 | * routine to do the work. |
| 1992 | */ |
| 1993 | int |
David Chinner | 20f4ebf | 2007-02-10 18:36:10 +1100 | [diff] [blame] | 1994 | xfs_mod_incore_sb( |
Christoph Hellwig | 96540c7 | 2010-09-30 02:25:55 +0000 | [diff] [blame] | 1995 | struct xfs_mount *mp, |
| 1996 | xfs_sb_field_t field, |
| 1997 | int64_t delta, |
| 1998 | int rsvd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | { |
Christoph Hellwig | 96540c7 | 2010-09-30 02:25:55 +0000 | [diff] [blame] | 2000 | int status; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2001 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2002 | #ifdef HAVE_PERCPU_SB |
Christoph Hellwig | 96540c7 | 2010-09-30 02:25:55 +0000 | [diff] [blame] | 2003 | ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2004 | #endif |
Christoph Hellwig | 96540c7 | 2010-09-30 02:25:55 +0000 | [diff] [blame] | 2005 | spin_lock(&mp->m_sb_lock); |
| 2006 | status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
| 2007 | spin_unlock(&mp->m_sb_lock); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2008 | |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 2009 | return status; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2010 | } |
| 2011 | |
| 2012 | /* |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2013 | * Change more than one field in the in-core superblock structure at a time. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2014 | * |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2015 | * The fields and changes to those fields are specified in the array of |
| 2016 | * xfs_mod_sb structures passed in. Either all of the specified deltas |
| 2017 | * will be applied or none of them will. If any modified field dips below 0, |
| 2018 | * then all modifications will be backed out and EINVAL will be returned. |
| 2019 | * |
| 2020 | * Note that this function may not be used for the superblock values that |
| 2021 | * are tracked with the in-memory per-cpu counters - a direct call to |
| 2022 | * xfs_icsb_modify_counters is required for these. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2023 | */ |
| 2024 | int |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2025 | xfs_mod_incore_sb_batch( |
| 2026 | struct xfs_mount *mp, |
| 2027 | xfs_mod_sb_t *msb, |
| 2028 | uint nmsb, |
| 2029 | int rsvd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2030 | { |
David Sterba | 45c51b9 | 2011-04-13 22:03:28 +0000 | [diff] [blame] | 2031 | xfs_mod_sb_t *msbp; |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2032 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2033 | |
| 2034 | /* |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2035 | * Loop through the array of mod structures and apply each individually. |
| 2036 | * If any fail, then back out all those which have already been applied. |
| 2037 | * Do all of this within the scope of the m_sb_lock so that all of the |
| 2038 | * changes will be atomic. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2039 | */ |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2040 | spin_lock(&mp->m_sb_lock); |
David Sterba | 45c51b9 | 2011-04-13 22:03:28 +0000 | [diff] [blame] | 2041 | for (msbp = msb; msbp < (msb + nmsb); msbp++) { |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2042 | ASSERT(msbp->msb_field < XFS_SBS_ICOUNT || |
| 2043 | msbp->msb_field > XFS_SBS_FDBLOCKS); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2044 | |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2045 | error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, |
| 2046 | msbp->msb_delta, rsvd); |
| 2047 | if (error) |
| 2048 | goto unwind; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2049 | } |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2050 | spin_unlock(&mp->m_sb_lock); |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2051 | return 0; |
| 2052 | |
| 2053 | unwind: |
| 2054 | while (--msbp >= msb) { |
| 2055 | error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, |
| 2056 | -msbp->msb_delta, rsvd); |
| 2057 | ASSERT(error == 0); |
| 2058 | } |
| 2059 | spin_unlock(&mp->m_sb_lock); |
| 2060 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2061 | } |
| 2062 | |
| 2063 | /* |
| 2064 | * xfs_getsb() is called to obtain the buffer for the superblock. |
| 2065 | * The buffer is returned locked and read in from disk. |
| 2066 | * The buffer should be released with a call to xfs_brelse(). |
| 2067 | * |
| 2068 | * If the flags parameter is BUF_TRYLOCK, then we'll only return |
| 2069 | * the superblock buffer if it can be locked without sleeping. |
| 2070 | * If it can't then we'll return NULL. |
| 2071 | */ |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 2072 | struct xfs_buf * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2073 | xfs_getsb( |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 2074 | struct xfs_mount *mp, |
| 2075 | int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2076 | { |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 2077 | struct xfs_buf *bp = mp->m_sb_bp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 2079 | if (!xfs_buf_trylock(bp)) { |
| 2080 | if (flags & XBF_TRYLOCK) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2081 | return NULL; |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 2082 | xfs_buf_lock(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2083 | } |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 2084 | |
Chandra Seetharaman | 72790aa | 2011-07-22 23:40:04 +0000 | [diff] [blame] | 2085 | xfs_buf_hold(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2086 | ASSERT(XFS_BUF_ISDONE(bp)); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 2087 | return bp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2088 | } |
| 2089 | |
| 2090 | /* |
| 2091 | * Used to free the superblock along various error paths. |
| 2092 | */ |
| 2093 | void |
| 2094 | xfs_freesb( |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 2095 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2096 | { |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 2097 | struct xfs_buf *bp = mp->m_sb_bp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2098 | |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 2099 | xfs_buf_lock(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2100 | mp->m_sb_bp = NULL; |
Dave Chinner | 26af655 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 2101 | xfs_buf_relse(bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2102 | } |
| 2103 | |
| 2104 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2105 | * Used to log changes to the superblock unit and width fields which could |
Eric Sandeen | e6957ea | 2008-04-10 12:19:34 +1000 | [diff] [blame] | 2106 | * be altered by the mount options, as well as any potential sb_features2 |
| 2107 | * fixup. Only the first superblock is updated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2108 | */ |
Christoph Hellwig | 7884bc8 | 2009-01-19 02:04:07 +0100 | [diff] [blame] | 2109 | int |
David Chinner | ee1c090 | 2008-03-06 13:45:50 +1100 | [diff] [blame] | 2110 | xfs_mount_log_sb( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2111 | xfs_mount_t *mp, |
| 2112 | __int64_t fields) |
| 2113 | { |
| 2114 | xfs_trans_t *tp; |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 2115 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 | |
David Chinner | ee1c090 | 2008-03-06 13:45:50 +1100 | [diff] [blame] | 2117 | ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | |
David Chinner | 4b166de | 2008-05-20 11:30:27 +1000 | [diff] [blame] | 2118 | XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 | |
| 2119 | XFS_SB_VERSIONNUM)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2120 | |
| 2121 | tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); |
Jeff Liu | 5166ab0 | 2013-01-28 21:27:39 +0800 | [diff] [blame] | 2122 | error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0, |
| 2123 | XFS_DEFAULT_LOG_COUNT); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 2124 | if (error) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2125 | xfs_trans_cancel(tp, 0); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 2126 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2127 | } |
| 2128 | xfs_mod_sb(tp, fields); |
David Chinner | e5720ee | 2008-04-10 12:21:18 +1000 | [diff] [blame] | 2129 | error = xfs_trans_commit(tp, 0); |
| 2130 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2131 | } |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2132 | |
Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 2133 | /* |
| 2134 | * If the underlying (data/log/rt) device is readonly, there are some |
| 2135 | * operations that cannot proceed. |
| 2136 | */ |
| 2137 | int |
| 2138 | xfs_dev_is_read_only( |
| 2139 | struct xfs_mount *mp, |
| 2140 | char *message) |
| 2141 | { |
| 2142 | if (xfs_readonly_buftarg(mp->m_ddev_targp) || |
| 2143 | xfs_readonly_buftarg(mp->m_logdev_targp) || |
| 2144 | (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { |
Dave Chinner | 0b932cc | 2011-03-07 10:08:35 +1100 | [diff] [blame] | 2145 | xfs_notice(mp, "%s required on read-only device.", message); |
| 2146 | xfs_notice(mp, "write access unavailable, cannot proceed."); |
Christoph Hellwig | dda35b8 | 2010-02-15 09:44:46 +0000 | [diff] [blame] | 2147 | return EROFS; |
| 2148 | } |
| 2149 | return 0; |
| 2150 | } |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2151 | |
| 2152 | #ifdef HAVE_PERCPU_SB |
| 2153 | /* |
| 2154 | * Per-cpu incore superblock counters |
| 2155 | * |
| 2156 | * Simple concept, difficult implementation |
| 2157 | * |
| 2158 | * Basically, replace the incore superblock counters with a distributed per cpu |
| 2159 | * counter for contended fields (e.g. free block count). |
| 2160 | * |
| 2161 | * Difficulties arise in that the incore sb is used for ENOSPC checking, and |
| 2162 | * hence needs to be accurately read when we are running low on space. Hence |
| 2163 | * there is a method to enable and disable the per-cpu counters based on how |
| 2164 | * much "stuff" is available in them. |
| 2165 | * |
| 2166 | * Basically, a counter is enabled if there is enough free resource to justify |
| 2167 | * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local |
| 2168 | * ENOSPC), then we disable the counters to synchronise all callers and |
| 2169 | * re-distribute the available resources. |
| 2170 | * |
| 2171 | * If, once we redistributed the available resources, we still get a failure, |
| 2172 | * we disable the per-cpu counter and go through the slow path. |
| 2173 | * |
| 2174 | * The slow path is the current xfs_mod_incore_sb() function. This means that |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 2175 | * when we disable a per-cpu counter, we need to drain its resources back to |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2176 | * the global superblock. We do this after disabling the counter to prevent |
| 2177 | * more threads from queueing up on the counter. |
| 2178 | * |
| 2179 | * Essentially, this means that we still need a lock in the fast path to enable |
| 2180 | * synchronisation between the global counters and the per-cpu counters. This |
| 2181 | * is not a problem because the lock will be local to a CPU almost all the time |
| 2182 | * and have little contention except when we get to ENOSPC conditions. |
| 2183 | * |
| 2184 | * Basically, this lock becomes a barrier that enables us to lock out the fast |
| 2185 | * path while we do things like enabling and disabling counters and |
| 2186 | * synchronising the counters. |
| 2187 | * |
| 2188 | * Locking rules: |
| 2189 | * |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2190 | * 1. m_sb_lock before picking up per-cpu locks |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2191 | * 2. per-cpu locks always picked up via for_each_online_cpu() order |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2192 | * 3. accurate counter sync requires m_sb_lock + per cpu locks |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2193 | * 4. modifying per-cpu counters requires holding per-cpu lock |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2194 | * 5. modifying global counters requires holding m_sb_lock |
| 2195 | * 6. enabling or disabling a counter requires holding the m_sb_lock |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2196 | * and _none_ of the per-cpu locks. |
| 2197 | * |
| 2198 | * Disabled counters are only ever re-enabled by a balance operation |
| 2199 | * that results in more free resources per CPU than a given threshold. |
| 2200 | * To ensure counters don't remain disabled, they are rebalanced when |
| 2201 | * the global resource goes above a higher threshold (i.e. some hysteresis |
| 2202 | * is present to prevent thrashing). |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2203 | */ |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2204 | |
Chandra Seetharaman | 5a67e4c | 2006-06-27 02:54:11 -0700 | [diff] [blame] | 2205 | #ifdef CONFIG_HOTPLUG_CPU |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2206 | /* |
| 2207 | * hot-plug CPU notifier support. |
| 2208 | * |
Chandra Seetharaman | 5a67e4c | 2006-06-27 02:54:11 -0700 | [diff] [blame] | 2209 | * We need a notifier per filesystem as we need to be able to identify |
| 2210 | * the filesystem to balance the counters out. This is achieved by |
| 2211 | * having a notifier block embedded in the xfs_mount_t and doing pointer |
| 2212 | * magic to get the mount pointer from the notifier block address. |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2213 | */ |
| 2214 | STATIC int |
| 2215 | xfs_icsb_cpu_notify( |
| 2216 | struct notifier_block *nfb, |
| 2217 | unsigned long action, |
| 2218 | void *hcpu) |
| 2219 | { |
| 2220 | xfs_icsb_cnts_t *cntp; |
| 2221 | xfs_mount_t *mp; |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2222 | |
| 2223 | mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); |
| 2224 | cntp = (xfs_icsb_cnts_t *) |
| 2225 | per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); |
| 2226 | switch (action) { |
| 2227 | case CPU_UP_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 2228 | case CPU_UP_PREPARE_FROZEN: |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2229 | /* Easy Case - initialize the area and locks, and |
| 2230 | * then rebalance when online does everything else for us. */ |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2231 | memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2232 | break; |
| 2233 | case CPU_ONLINE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 2234 | case CPU_ONLINE_FROZEN: |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2235 | xfs_icsb_lock(mp); |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2236 | xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); |
| 2237 | xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); |
| 2238 | xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2239 | xfs_icsb_unlock(mp); |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2240 | break; |
| 2241 | case CPU_DEAD: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 2242 | case CPU_DEAD_FROZEN: |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2243 | /* Disable all the counters, then fold the dead cpu's |
| 2244 | * count into the total on the global superblock and |
| 2245 | * re-enable the counters. */ |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2246 | xfs_icsb_lock(mp); |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2247 | spin_lock(&mp->m_sb_lock); |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2248 | xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); |
| 2249 | xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); |
| 2250 | xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); |
| 2251 | |
| 2252 | mp->m_sb.sb_icount += cntp->icsb_icount; |
| 2253 | mp->m_sb.sb_ifree += cntp->icsb_ifree; |
| 2254 | mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; |
| 2255 | |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2256 | memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2257 | |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2258 | xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0); |
| 2259 | xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0); |
| 2260 | xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0); |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2261 | spin_unlock(&mp->m_sb_lock); |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2262 | xfs_icsb_unlock(mp); |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2263 | break; |
| 2264 | } |
| 2265 | |
| 2266 | return NOTIFY_OK; |
| 2267 | } |
Chandra Seetharaman | 5a67e4c | 2006-06-27 02:54:11 -0700 | [diff] [blame] | 2268 | #endif /* CONFIG_HOTPLUG_CPU */ |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2269 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2270 | int |
| 2271 | xfs_icsb_init_counters( |
| 2272 | xfs_mount_t *mp) |
| 2273 | { |
| 2274 | xfs_icsb_cnts_t *cntp; |
| 2275 | int i; |
| 2276 | |
| 2277 | mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); |
| 2278 | if (mp->m_sb_cnts == NULL) |
| 2279 | return -ENOMEM; |
| 2280 | |
Chandra Seetharaman | 5a67e4c | 2006-06-27 02:54:11 -0700 | [diff] [blame] | 2281 | #ifdef CONFIG_HOTPLUG_CPU |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2282 | mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; |
| 2283 | mp->m_icsb_notifier.priority = 0; |
Chandra Seetharaman | 5a67e4c | 2006-06-27 02:54:11 -0700 | [diff] [blame] | 2284 | register_hotcpu_notifier(&mp->m_icsb_notifier); |
| 2285 | #endif /* CONFIG_HOTPLUG_CPU */ |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2286 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2287 | for_each_online_cpu(i) { |
| 2288 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2289 | memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2290 | } |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2291 | |
| 2292 | mutex_init(&mp->m_icsb_mutex); |
| 2293 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2294 | /* |
| 2295 | * start with all counters disabled so that the |
| 2296 | * initial balance kicks us off correctly |
| 2297 | */ |
| 2298 | mp->m_icsb_counters = -1; |
| 2299 | return 0; |
| 2300 | } |
| 2301 | |
Lachlan McIlroy | 5478eea | 2007-02-10 18:36:29 +1100 | [diff] [blame] | 2302 | void |
| 2303 | xfs_icsb_reinit_counters( |
| 2304 | xfs_mount_t *mp) |
| 2305 | { |
| 2306 | xfs_icsb_lock(mp); |
| 2307 | /* |
| 2308 | * start with all counters disabled so that the |
| 2309 | * initial balance kicks us off correctly |
| 2310 | */ |
| 2311 | mp->m_icsb_counters = -1; |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2312 | xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); |
| 2313 | xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); |
| 2314 | xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); |
Lachlan McIlroy | 5478eea | 2007-02-10 18:36:29 +1100 | [diff] [blame] | 2315 | xfs_icsb_unlock(mp); |
| 2316 | } |
| 2317 | |
Christoph Hellwig | c962fb7 | 2008-05-20 15:10:52 +1000 | [diff] [blame] | 2318 | void |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2319 | xfs_icsb_destroy_counters( |
| 2320 | xfs_mount_t *mp) |
| 2321 | { |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2322 | if (mp->m_sb_cnts) { |
Chandra Seetharaman | 5a67e4c | 2006-06-27 02:54:11 -0700 | [diff] [blame] | 2323 | unregister_hotcpu_notifier(&mp->m_icsb_notifier); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2324 | free_percpu(mp->m_sb_cnts); |
David Chinner | e8234a6 | 2006-03-14 13:23:52 +1100 | [diff] [blame] | 2325 | } |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2326 | mutex_destroy(&mp->m_icsb_mutex); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2327 | } |
| 2328 | |
Christoph Hellwig | b8f82a4 | 2009-11-14 16:17:22 +0000 | [diff] [blame] | 2329 | STATIC void |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2330 | xfs_icsb_lock_cntr( |
| 2331 | xfs_icsb_cnts_t *icsbp) |
| 2332 | { |
| 2333 | while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { |
| 2334 | ndelay(1000); |
| 2335 | } |
| 2336 | } |
| 2337 | |
Christoph Hellwig | b8f82a4 | 2009-11-14 16:17:22 +0000 | [diff] [blame] | 2338 | STATIC void |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2339 | xfs_icsb_unlock_cntr( |
| 2340 | xfs_icsb_cnts_t *icsbp) |
| 2341 | { |
| 2342 | clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); |
| 2343 | } |
| 2344 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2345 | |
Christoph Hellwig | b8f82a4 | 2009-11-14 16:17:22 +0000 | [diff] [blame] | 2346 | STATIC void |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2347 | xfs_icsb_lock_all_counters( |
| 2348 | xfs_mount_t *mp) |
| 2349 | { |
| 2350 | xfs_icsb_cnts_t *cntp; |
| 2351 | int i; |
| 2352 | |
| 2353 | for_each_online_cpu(i) { |
| 2354 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2355 | xfs_icsb_lock_cntr(cntp); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2356 | } |
| 2357 | } |
| 2358 | |
Christoph Hellwig | b8f82a4 | 2009-11-14 16:17:22 +0000 | [diff] [blame] | 2359 | STATIC void |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2360 | xfs_icsb_unlock_all_counters( |
| 2361 | xfs_mount_t *mp) |
| 2362 | { |
| 2363 | xfs_icsb_cnts_t *cntp; |
| 2364 | int i; |
| 2365 | |
| 2366 | for_each_online_cpu(i) { |
| 2367 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2368 | xfs_icsb_unlock_cntr(cntp); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2369 | } |
| 2370 | } |
| 2371 | |
| 2372 | STATIC void |
| 2373 | xfs_icsb_count( |
| 2374 | xfs_mount_t *mp, |
| 2375 | xfs_icsb_cnts_t *cnt, |
| 2376 | int flags) |
| 2377 | { |
| 2378 | xfs_icsb_cnts_t *cntp; |
| 2379 | int i; |
| 2380 | |
| 2381 | memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); |
| 2382 | |
| 2383 | if (!(flags & XFS_ICSB_LAZY_COUNT)) |
| 2384 | xfs_icsb_lock_all_counters(mp); |
| 2385 | |
| 2386 | for_each_online_cpu(i) { |
| 2387 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
| 2388 | cnt->icsb_icount += cntp->icsb_icount; |
| 2389 | cnt->icsb_ifree += cntp->icsb_ifree; |
| 2390 | cnt->icsb_fdblocks += cntp->icsb_fdblocks; |
| 2391 | } |
| 2392 | |
| 2393 | if (!(flags & XFS_ICSB_LAZY_COUNT)) |
| 2394 | xfs_icsb_unlock_all_counters(mp); |
| 2395 | } |
| 2396 | |
| 2397 | STATIC int |
| 2398 | xfs_icsb_counter_disabled( |
| 2399 | xfs_mount_t *mp, |
| 2400 | xfs_sb_field_t field) |
| 2401 | { |
| 2402 | ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); |
| 2403 | return test_bit(field, &mp->m_icsb_counters); |
| 2404 | } |
| 2405 | |
David Chinner | 36fbe6e | 2008-04-10 12:19:56 +1000 | [diff] [blame] | 2406 | STATIC void |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2407 | xfs_icsb_disable_counter( |
| 2408 | xfs_mount_t *mp, |
| 2409 | xfs_sb_field_t field) |
| 2410 | { |
| 2411 | xfs_icsb_cnts_t cnt; |
| 2412 | |
| 2413 | ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); |
| 2414 | |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2415 | /* |
| 2416 | * If we are already disabled, then there is nothing to do |
| 2417 | * here. We check before locking all the counters to avoid |
| 2418 | * the expensive lock operation when being called in the |
| 2419 | * slow path and the counter is already disabled. This is |
| 2420 | * safe because the only time we set or clear this state is under |
| 2421 | * the m_icsb_mutex. |
| 2422 | */ |
| 2423 | if (xfs_icsb_counter_disabled(mp, field)) |
David Chinner | 36fbe6e | 2008-04-10 12:19:56 +1000 | [diff] [blame] | 2424 | return; |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2425 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2426 | xfs_icsb_lock_all_counters(mp); |
| 2427 | if (!test_and_set_bit(field, &mp->m_icsb_counters)) { |
| 2428 | /* drain back to superblock */ |
| 2429 | |
Christoph Hellwig | ce46193 | 2008-04-22 17:34:50 +1000 | [diff] [blame] | 2430 | xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2431 | switch(field) { |
| 2432 | case XFS_SBS_ICOUNT: |
| 2433 | mp->m_sb.sb_icount = cnt.icsb_icount; |
| 2434 | break; |
| 2435 | case XFS_SBS_IFREE: |
| 2436 | mp->m_sb.sb_ifree = cnt.icsb_ifree; |
| 2437 | break; |
| 2438 | case XFS_SBS_FDBLOCKS: |
| 2439 | mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; |
| 2440 | break; |
| 2441 | default: |
| 2442 | BUG(); |
| 2443 | } |
| 2444 | } |
| 2445 | |
| 2446 | xfs_icsb_unlock_all_counters(mp); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2447 | } |
| 2448 | |
| 2449 | STATIC void |
| 2450 | xfs_icsb_enable_counter( |
| 2451 | xfs_mount_t *mp, |
| 2452 | xfs_sb_field_t field, |
| 2453 | uint64_t count, |
| 2454 | uint64_t resid) |
| 2455 | { |
| 2456 | xfs_icsb_cnts_t *cntp; |
| 2457 | int i; |
| 2458 | |
| 2459 | ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); |
| 2460 | |
| 2461 | xfs_icsb_lock_all_counters(mp); |
| 2462 | for_each_online_cpu(i) { |
| 2463 | cntp = per_cpu_ptr(mp->m_sb_cnts, i); |
| 2464 | switch (field) { |
| 2465 | case XFS_SBS_ICOUNT: |
| 2466 | cntp->icsb_icount = count + resid; |
| 2467 | break; |
| 2468 | case XFS_SBS_IFREE: |
| 2469 | cntp->icsb_ifree = count + resid; |
| 2470 | break; |
| 2471 | case XFS_SBS_FDBLOCKS: |
| 2472 | cntp->icsb_fdblocks = count + resid; |
| 2473 | break; |
| 2474 | default: |
| 2475 | BUG(); |
| 2476 | break; |
| 2477 | } |
| 2478 | resid = 0; |
| 2479 | } |
| 2480 | clear_bit(field, &mp->m_icsb_counters); |
| 2481 | xfs_icsb_unlock_all_counters(mp); |
| 2482 | } |
| 2483 | |
David Chinner | dbcabad | 2007-02-10 18:36:17 +1100 | [diff] [blame] | 2484 | void |
Christoph Hellwig | d4d90b5 | 2008-04-22 17:34:37 +1000 | [diff] [blame] | 2485 | xfs_icsb_sync_counters_locked( |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2486 | xfs_mount_t *mp, |
| 2487 | int flags) |
| 2488 | { |
| 2489 | xfs_icsb_cnts_t cnt; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2490 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2491 | xfs_icsb_count(mp, &cnt, flags); |
| 2492 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2493 | if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) |
| 2494 | mp->m_sb.sb_icount = cnt.icsb_icount; |
| 2495 | if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) |
| 2496 | mp->m_sb.sb_ifree = cnt.icsb_ifree; |
| 2497 | if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) |
| 2498 | mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2499 | } |
| 2500 | |
| 2501 | /* |
| 2502 | * Accurate update of per-cpu counters to incore superblock |
| 2503 | */ |
Christoph Hellwig | d4d90b5 | 2008-04-22 17:34:37 +1000 | [diff] [blame] | 2504 | void |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2505 | xfs_icsb_sync_counters( |
Christoph Hellwig | d4d90b5 | 2008-04-22 17:34:37 +1000 | [diff] [blame] | 2506 | xfs_mount_t *mp, |
| 2507 | int flags) |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2508 | { |
Christoph Hellwig | d4d90b5 | 2008-04-22 17:34:37 +1000 | [diff] [blame] | 2509 | spin_lock(&mp->m_sb_lock); |
| 2510 | xfs_icsb_sync_counters_locked(mp, flags); |
| 2511 | spin_unlock(&mp->m_sb_lock); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2512 | } |
| 2513 | |
| 2514 | /* |
| 2515 | * Balance and enable/disable counters as necessary. |
| 2516 | * |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2517 | * Thresholds for re-enabling counters are somewhat magic. inode counts are |
| 2518 | * chosen to be the same number as single on disk allocation chunk per CPU, and |
| 2519 | * free blocks is something far enough zero that we aren't going thrash when we |
| 2520 | * get near ENOSPC. We also need to supply a minimum we require per cpu to |
| 2521 | * prevent looping endlessly when xfs_alloc_space asks for more than will |
| 2522 | * be distributed to a single CPU but each CPU has enough blocks to be |
| 2523 | * reenabled. |
| 2524 | * |
| 2525 | * Note that we can be called when counters are already disabled. |
| 2526 | * xfs_icsb_disable_counter() optimises the counter locking in this case to |
| 2527 | * prevent locking every per-cpu counter needlessly. |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2528 | */ |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2529 | |
| 2530 | #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64 |
David Chinner | 4be536d | 2006-09-07 14:26:50 +1000 | [diff] [blame] | 2531 | #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2532 | (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp)) |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2533 | STATIC void |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2534 | xfs_icsb_balance_counter_locked( |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2535 | xfs_mount_t *mp, |
| 2536 | xfs_sb_field_t field, |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2537 | int min_per_cpu) |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2538 | { |
Nathan Scott | 6fdf8cc | 2006-06-28 10:13:52 +1000 | [diff] [blame] | 2539 | uint64_t count, resid; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2540 | int weight = num_online_cpus(); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2541 | uint64_t min = (uint64_t)min_per_cpu; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2542 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2543 | /* disable counter and sync counter */ |
| 2544 | xfs_icsb_disable_counter(mp, field); |
| 2545 | |
| 2546 | /* update counters - first CPU gets residual*/ |
| 2547 | switch (field) { |
| 2548 | case XFS_SBS_ICOUNT: |
| 2549 | count = mp->m_sb.sb_icount; |
| 2550 | resid = do_div(count, weight); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2551 | if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2552 | return; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2553 | break; |
| 2554 | case XFS_SBS_IFREE: |
| 2555 | count = mp->m_sb.sb_ifree; |
| 2556 | resid = do_div(count, weight); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2557 | if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2558 | return; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2559 | break; |
| 2560 | case XFS_SBS_FDBLOCKS: |
| 2561 | count = mp->m_sb.sb_fdblocks; |
| 2562 | resid = do_div(count, weight); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2563 | if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2564 | return; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2565 | break; |
| 2566 | default: |
| 2567 | BUG(); |
Nathan Scott | 6fdf8cc | 2006-06-28 10:13:52 +1000 | [diff] [blame] | 2568 | count = resid = 0; /* quiet, gcc */ |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2569 | break; |
| 2570 | } |
| 2571 | |
| 2572 | xfs_icsb_enable_counter(mp, field, count, resid); |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2573 | } |
| 2574 | |
| 2575 | STATIC void |
| 2576 | xfs_icsb_balance_counter( |
| 2577 | xfs_mount_t *mp, |
| 2578 | xfs_sb_field_t fields, |
| 2579 | int min_per_cpu) |
| 2580 | { |
| 2581 | spin_lock(&mp->m_sb_lock); |
| 2582 | xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu); |
| 2583 | spin_unlock(&mp->m_sb_lock); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2584 | } |
| 2585 | |
Christoph Hellwig | 1b04071 | 2010-09-30 02:25:56 +0000 | [diff] [blame] | 2586 | int |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2587 | xfs_icsb_modify_counters( |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2588 | xfs_mount_t *mp, |
| 2589 | xfs_sb_field_t field, |
David Chinner | 20f4ebf | 2007-02-10 18:36:10 +1100 | [diff] [blame] | 2590 | int64_t delta, |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2591 | int rsvd) |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2592 | { |
| 2593 | xfs_icsb_cnts_t *icsbp; |
| 2594 | long long lcounter; /* long counter for 64 bit fields */ |
Christoph Lameter | 7a9e02d | 2009-10-03 19:48:23 +0900 | [diff] [blame] | 2595 | int ret = 0; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2596 | |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2597 | might_sleep(); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2598 | again: |
Christoph Lameter | 7a9e02d | 2009-10-03 19:48:23 +0900 | [diff] [blame] | 2599 | preempt_disable(); |
| 2600 | icsbp = this_cpu_ptr(mp->m_sb_cnts); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2601 | |
| 2602 | /* |
| 2603 | * if the counter is disabled, go to slow path |
| 2604 | */ |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2605 | if (unlikely(xfs_icsb_counter_disabled(mp, field))) |
| 2606 | goto slow_path; |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2607 | xfs_icsb_lock_cntr(icsbp); |
| 2608 | if (unlikely(xfs_icsb_counter_disabled(mp, field))) { |
| 2609 | xfs_icsb_unlock_cntr(icsbp); |
| 2610 | goto slow_path; |
| 2611 | } |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2612 | |
| 2613 | switch (field) { |
| 2614 | case XFS_SBS_ICOUNT: |
| 2615 | lcounter = icsbp->icsb_icount; |
| 2616 | lcounter += delta; |
| 2617 | if (unlikely(lcounter < 0)) |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2618 | goto balance_counter; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2619 | icsbp->icsb_icount = lcounter; |
| 2620 | break; |
| 2621 | |
| 2622 | case XFS_SBS_IFREE: |
| 2623 | lcounter = icsbp->icsb_ifree; |
| 2624 | lcounter += delta; |
| 2625 | if (unlikely(lcounter < 0)) |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2626 | goto balance_counter; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2627 | icsbp->icsb_ifree = lcounter; |
| 2628 | break; |
| 2629 | |
| 2630 | case XFS_SBS_FDBLOCKS: |
| 2631 | BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); |
| 2632 | |
David Chinner | 4be536d | 2006-09-07 14:26:50 +1000 | [diff] [blame] | 2633 | lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2634 | lcounter += delta; |
| 2635 | if (unlikely(lcounter < 0)) |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2636 | goto balance_counter; |
David Chinner | 4be536d | 2006-09-07 14:26:50 +1000 | [diff] [blame] | 2637 | icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2638 | break; |
| 2639 | default: |
| 2640 | BUG(); |
| 2641 | break; |
| 2642 | } |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2643 | xfs_icsb_unlock_cntr(icsbp); |
Christoph Lameter | 7a9e02d | 2009-10-03 19:48:23 +0900 | [diff] [blame] | 2644 | preempt_enable(); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2645 | return 0; |
| 2646 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2647 | slow_path: |
Christoph Lameter | 7a9e02d | 2009-10-03 19:48:23 +0900 | [diff] [blame] | 2648 | preempt_enable(); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2649 | |
| 2650 | /* |
| 2651 | * serialise with a mutex so we don't burn lots of cpu on |
| 2652 | * the superblock lock. We still need to hold the superblock |
| 2653 | * lock, however, when we modify the global structures. |
| 2654 | */ |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2655 | xfs_icsb_lock(mp); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2656 | |
| 2657 | /* |
| 2658 | * Now running atomically. |
| 2659 | * |
| 2660 | * If the counter is enabled, someone has beaten us to rebalancing. |
| 2661 | * Drop the lock and try again in the fast path.... |
| 2662 | */ |
| 2663 | if (!(xfs_icsb_counter_disabled(mp, field))) { |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2664 | xfs_icsb_unlock(mp); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2665 | goto again; |
| 2666 | } |
| 2667 | |
| 2668 | /* |
| 2669 | * The counter is currently disabled. Because we are |
| 2670 | * running atomically here, we know a rebalance cannot |
| 2671 | * be in progress. Hence we can go straight to operating |
| 2672 | * on the global superblock. We do not call xfs_mod_incore_sb() |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2673 | * here even though we need to get the m_sb_lock. Doing so |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2674 | * will cause us to re-enter this function and deadlock. |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2675 | * Hence we get the m_sb_lock ourselves and then call |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2676 | * xfs_mod_incore_sb_unlocked() as the unlocked path operates |
| 2677 | * directly on the global counters. |
| 2678 | */ |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2679 | spin_lock(&mp->m_sb_lock); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2680 | ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
Eric Sandeen | 3685c2a | 2007-10-11 17:42:32 +1000 | [diff] [blame] | 2681 | spin_unlock(&mp->m_sb_lock); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2682 | |
| 2683 | /* |
| 2684 | * Now that we've modified the global superblock, we |
| 2685 | * may be able to re-enable the distributed counters |
| 2686 | * (e.g. lots of space just got freed). After that |
| 2687 | * we are done. |
| 2688 | */ |
| 2689 | if (ret != ENOSPC) |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2690 | xfs_icsb_balance_counter(mp, field, 0); |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2691 | xfs_icsb_unlock(mp); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2692 | return ret; |
| 2693 | |
| 2694 | balance_counter: |
David Chinner | 01e1b69 | 2006-03-14 13:29:16 +1100 | [diff] [blame] | 2695 | xfs_icsb_unlock_cntr(icsbp); |
Christoph Lameter | 7a9e02d | 2009-10-03 19:48:23 +0900 | [diff] [blame] | 2696 | preempt_enable(); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2697 | |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2698 | /* |
| 2699 | * We may have multiple threads here if multiple per-cpu |
| 2700 | * counters run dry at the same time. This will mean we can |
| 2701 | * do more balances than strictly necessary but it is not |
| 2702 | * the common slowpath case. |
| 2703 | */ |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2704 | xfs_icsb_lock(mp); |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2705 | |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2706 | /* |
| 2707 | * running atomically. |
| 2708 | * |
| 2709 | * This will leave the counter in the correct state for future |
| 2710 | * accesses. After the rebalance, we simply try again and our retry |
| 2711 | * will either succeed through the fast path or slow path without |
| 2712 | * another balance operation being required. |
| 2713 | */ |
Christoph Hellwig | 45af6c6 | 2008-04-22 17:34:44 +1000 | [diff] [blame] | 2714 | xfs_icsb_balance_counter(mp, field, delta); |
David Chinner | 03135cf | 2007-02-10 18:35:15 +1100 | [diff] [blame] | 2715 | xfs_icsb_unlock(mp); |
David Chinner | 20b6428 | 2007-02-10 18:35:09 +1100 | [diff] [blame] | 2716 | goto again; |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2717 | } |
| 2718 | |
David Chinner | 8d280b9 | 2006-03-14 13:13:09 +1100 | [diff] [blame] | 2719 | #endif |