| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Nathan Scott | 4ce3121 | 2005-11-02 14:59:41 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2003 Silicon Graphics, Inc. | 
|  | 3 | * All Rights Reserved. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * | 
| Nathan Scott | 4ce3121 | 2005-11-02 14:59:41 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public License as | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. | 
|  | 8 | * | 
| Nathan Scott | 4ce3121 | 2005-11-02 14:59:41 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * | 
| Nathan Scott | 4ce3121 | 2005-11-02 14:59:41 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License | 
|  | 15 | * along with this program; if not, write the Free Software Foundation, | 
|  | 16 | * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" | 
|  | 19 | #include "xfs_fs.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 20 | #include "xfs_bit.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include "xfs_log.h" | 
| Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 22 | #include "xfs_inum.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include "xfs_trans.h" | 
|  | 24 | #include "xfs_sb.h" | 
|  | 25 | #include "xfs_ag.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include "xfs_alloc.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include "xfs_quota.h" | 
|  | 28 | #include "xfs_mount.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include "xfs_bmap_btree.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include "xfs_inode.h" | 
|  | 31 | #include "xfs_bmap.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include "xfs_rtalloc.h" | 
|  | 33 | #include "xfs_error.h" | 
|  | 34 | #include "xfs_itable.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include "xfs_attr.h" | 
|  | 36 | #include "xfs_buf_item.h" | 
|  | 37 | #include "xfs_trans_space.h" | 
|  | 38 | #include "xfs_trans_priv.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include "xfs_qm.h" | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 40 | #include "xfs_trace.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
|  | 42 |  | 
|  | 43 | /* | 
|  | 44 | LOCK ORDER | 
|  | 45 |  | 
|  | 46 | inode lock		    (ilock) | 
|  | 47 | dquot hash-chain lock    (hashlock) | 
|  | 48 | xqm dquot freelist lock  (freelistlock | 
|  | 49 | mount's dquot list lock  (mplistlock) | 
|  | 50 | user dquot lock - lock ordering among dquots is based on the uid or gid | 
|  | 51 | group dquot lock - similar to udquots. Between the two dquots, the udquot | 
|  | 52 | has to be locked first. | 
|  | 53 | pin lock - the dquot lock must be held to take this lock. | 
|  | 54 | flush lock - ditto. | 
|  | 55 | */ | 
|  | 56 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | #ifdef DEBUG | 
|  | 58 | xfs_buftarg_t *xfs_dqerror_target; | 
|  | 59 | int xfs_do_dqerror; | 
|  | 60 | int xfs_dqreq_num; | 
|  | 61 | int xfs_dqerror_mod = 33; | 
|  | 62 | #endif | 
|  | 63 |  | 
| Christoph Hellwig | 98b8c7a | 2009-01-19 02:03:25 +0100 | [diff] [blame] | 64 | static struct lock_class_key xfs_dquot_other_class; | 
|  | 65 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | /* | 
|  | 67 | * Allocate and initialize a dquot. We don't always allocate fresh memory; | 
|  | 68 | * we try to reclaim a free dquot if the number of incore dquots are above | 
|  | 69 | * a threshold. | 
|  | 70 | * The only field inside the core that gets initialized at this point | 
|  | 71 | * is the d_id field. The idea is to fill in the entire q_core | 
|  | 72 | * when we read in the on disk dquot. | 
|  | 73 | */ | 
| Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 74 | STATIC xfs_dquot_t * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | xfs_qm_dqinit( | 
|  | 76 | xfs_mount_t  *mp, | 
|  | 77 | xfs_dqid_t   id, | 
|  | 78 | uint	     type) | 
|  | 79 | { | 
|  | 80 | xfs_dquot_t	*dqp; | 
|  | 81 | boolean_t	brandnewdquot; | 
|  | 82 |  | 
|  | 83 | brandnewdquot = xfs_qm_dqalloc_incore(&dqp); | 
|  | 84 | dqp->dq_flags = type; | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 85 | dqp->q_core.d_id = cpu_to_be32(id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | dqp->q_mount = mp; | 
|  | 87 |  | 
|  | 88 | /* | 
|  | 89 | * No need to re-initialize these if this is a reclaimed dquot. | 
|  | 90 | */ | 
|  | 91 | if (brandnewdquot) { | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 92 | INIT_LIST_HEAD(&dqp->q_freelist); | 
| Jes Sorensen | 794ee1b | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 93 | mutex_init(&dqp->q_qlock); | 
| Peter Leckie | bc3048e | 2008-10-30 17:05:04 +1100 | [diff] [blame] | 94 | init_waitqueue_head(&dqp->q_pinwait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
| David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 96 | /* | 
|  | 97 | * Because we want to use a counting completion, complete | 
|  | 98 | * the flush completion once to allow a single access to | 
|  | 99 | * the flush completion without blocking. | 
|  | 100 | */ | 
|  | 101 | init_completion(&dqp->q_flush); | 
|  | 102 | complete(&dqp->q_flush); | 
|  | 103 |  | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 104 | trace_xfs_dqinit(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | } else { | 
|  | 106 | /* | 
|  | 107 | * Only the q_core portion was zeroed in dqreclaim_one(). | 
|  | 108 | * So, we need to reset others. | 
|  | 109 | */ | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 110 | dqp->q_nrefs = 0; | 
|  | 111 | dqp->q_blkno = 0; | 
|  | 112 | INIT_LIST_HEAD(&dqp->q_mplist); | 
|  | 113 | INIT_LIST_HEAD(&dqp->q_hashlist); | 
|  | 114 | dqp->q_bufoffset = 0; | 
|  | 115 | dqp->q_fileoffset = 0; | 
|  | 116 | dqp->q_transp = NULL; | 
|  | 117 | dqp->q_gdquot = NULL; | 
|  | 118 | dqp->q_res_bcount = 0; | 
|  | 119 | dqp->q_res_icount = 0; | 
|  | 120 | dqp->q_res_rtbcount = 0; | 
|  | 121 | atomic_set(&dqp->q_pincount, 0); | 
|  | 122 | dqp->q_hash = NULL; | 
|  | 123 | ASSERT(list_empty(&dqp->q_freelist)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 |  | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 125 | trace_xfs_dqreuse(dqp); | 
| Christoph Hellwig | 98b8c7a | 2009-01-19 02:03:25 +0100 | [diff] [blame] | 126 | } | 
|  | 127 |  | 
|  | 128 | /* | 
|  | 129 | * In either case we need to make sure group quotas have a different | 
|  | 130 | * lock class than user quotas, to make sure lockdep knows we can | 
|  | 131 | * locks of one of each at the same time. | 
|  | 132 | */ | 
|  | 133 | if (!(type & XFS_DQ_USER)) | 
|  | 134 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 |  | 
|  | 136 | /* | 
|  | 137 | * log item gets initialized later | 
|  | 138 | */ | 
|  | 139 | return (dqp); | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | /* | 
|  | 143 | * This is called to free all the memory associated with a dquot | 
|  | 144 | */ | 
|  | 145 | void | 
|  | 146 | xfs_qm_dqdestroy( | 
|  | 147 | xfs_dquot_t	*dqp) | 
|  | 148 | { | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 149 | ASSERT(list_empty(&dqp->q_freelist)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 |  | 
|  | 151 | mutex_destroy(&dqp->q_qlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | sv_destroy(&dqp->q_pinwait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 154 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | atomic_dec(&xfs_Gqm->qm_totaldquots); | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | /* | 
|  | 159 | * This is what a 'fresh' dquot inside a dquot chunk looks like on disk. | 
|  | 160 | */ | 
|  | 161 | STATIC void | 
|  | 162 | xfs_qm_dqinit_core( | 
|  | 163 | xfs_dqid_t	id, | 
|  | 164 | uint		type, | 
|  | 165 | xfs_dqblk_t	*d) | 
|  | 166 | { | 
|  | 167 | /* | 
|  | 168 | * Caller has zero'd the entire dquot 'chunk' already. | 
|  | 169 | */ | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 170 | d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); | 
|  | 171 | d->dd_diskdq.d_version = XFS_DQUOT_VERSION; | 
|  | 172 | d->dd_diskdq.d_id = cpu_to_be32(id); | 
|  | 173 | d->dd_diskdq.d_flags = type; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | } | 
|  | 175 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | /* | 
|  | 177 | * If default limits are in force, push them into the dquot now. | 
|  | 178 | * We overwrite the dquot limits only if they are zero and this | 
|  | 179 | * is not the root dquot. | 
|  | 180 | */ | 
|  | 181 | void | 
|  | 182 | xfs_qm_adjust_dqlimits( | 
|  | 183 | xfs_mount_t		*mp, | 
|  | 184 | xfs_disk_dquot_t	*d) | 
|  | 185 | { | 
|  | 186 | xfs_quotainfo_t		*q = mp->m_quotainfo; | 
|  | 187 |  | 
|  | 188 | ASSERT(d->d_id); | 
|  | 189 |  | 
|  | 190 | if (q->qi_bsoftlimit && !d->d_blk_softlimit) | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 191 | d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | if (q->qi_bhardlimit && !d->d_blk_hardlimit) | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 193 | d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | if (q->qi_isoftlimit && !d->d_ino_softlimit) | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 195 | d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | if (q->qi_ihardlimit && !d->d_ino_hardlimit) | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 197 | d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit) | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 199 | d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit) | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 201 | d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | } | 
|  | 203 |  | 
|  | 204 | /* | 
|  | 205 | * Check the limits and timers of a dquot and start or reset timers | 
|  | 206 | * if necessary. | 
|  | 207 | * This gets called even when quota enforcement is OFF, which makes our | 
|  | 208 | * life a little less complicated. (We just don't reject any quota | 
|  | 209 | * reservations in that case, when enforcement is off). | 
|  | 210 | * We also return 0 as the values of the timers in Q_GETQUOTA calls, when | 
|  | 211 | * enforcement's off. | 
|  | 212 | * In contrast, warnings are a little different in that they don't | 
| Nathan Scott | 754002b | 2005-06-21 15:49:06 +1000 | [diff] [blame] | 213 | * 'automatically' get started when limits get exceeded.  They do | 
|  | 214 | * get reset to zero, however, when we find the count to be under | 
|  | 215 | * the soft limit (they are only ever set non-zero via userspace). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | */ | 
|  | 217 | void | 
|  | 218 | xfs_qm_adjust_dqtimers( | 
|  | 219 | xfs_mount_t		*mp, | 
|  | 220 | xfs_disk_dquot_t	*d) | 
|  | 221 | { | 
|  | 222 | ASSERT(d->d_id); | 
|  | 223 |  | 
|  | 224 | #ifdef QUOTADEBUG | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 225 | if (d->d_blk_hardlimit) | 
|  | 226 | ASSERT(be64_to_cpu(d->d_blk_softlimit) <= | 
|  | 227 | be64_to_cpu(d->d_blk_hardlimit)); | 
|  | 228 | if (d->d_ino_hardlimit) | 
|  | 229 | ASSERT(be64_to_cpu(d->d_ino_softlimit) <= | 
|  | 230 | be64_to_cpu(d->d_ino_hardlimit)); | 
|  | 231 | if (d->d_rtb_hardlimit) | 
|  | 232 | ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= | 
|  | 233 | be64_to_cpu(d->d_rtb_hardlimit)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | #endif | 
|  | 235 | if (!d->d_btimer) { | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 236 | if ((d->d_blk_softlimit && | 
|  | 237 | (be64_to_cpu(d->d_bcount) >= | 
|  | 238 | be64_to_cpu(d->d_blk_softlimit))) || | 
|  | 239 | (d->d_blk_hardlimit && | 
|  | 240 | (be64_to_cpu(d->d_bcount) >= | 
|  | 241 | be64_to_cpu(d->d_blk_hardlimit)))) { | 
|  | 242 | d->d_btimer = cpu_to_be32(get_seconds() + | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 243 | mp->m_quotainfo->qi_btimelimit); | 
| Nathan Scott | 754002b | 2005-06-21 15:49:06 +1000 | [diff] [blame] | 244 | } else { | 
|  | 245 | d->d_bwarns = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } | 
|  | 247 | } else { | 
|  | 248 | if ((!d->d_blk_softlimit || | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 249 | (be64_to_cpu(d->d_bcount) < | 
|  | 250 | be64_to_cpu(d->d_blk_softlimit))) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | (!d->d_blk_hardlimit || | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 252 | (be64_to_cpu(d->d_bcount) < | 
|  | 253 | be64_to_cpu(d->d_blk_hardlimit)))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | d->d_btimer = 0; | 
|  | 255 | } | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | if (!d->d_itimer) { | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 259 | if ((d->d_ino_softlimit && | 
|  | 260 | (be64_to_cpu(d->d_icount) >= | 
|  | 261 | be64_to_cpu(d->d_ino_softlimit))) || | 
|  | 262 | (d->d_ino_hardlimit && | 
|  | 263 | (be64_to_cpu(d->d_icount) >= | 
|  | 264 | be64_to_cpu(d->d_ino_hardlimit)))) { | 
|  | 265 | d->d_itimer = cpu_to_be32(get_seconds() + | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 266 | mp->m_quotainfo->qi_itimelimit); | 
| Nathan Scott | 754002b | 2005-06-21 15:49:06 +1000 | [diff] [blame] | 267 | } else { | 
|  | 268 | d->d_iwarns = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | } | 
|  | 270 | } else { | 
|  | 271 | if ((!d->d_ino_softlimit || | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 272 | (be64_to_cpu(d->d_icount) < | 
|  | 273 | be64_to_cpu(d->d_ino_softlimit)))  && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | (!d->d_ino_hardlimit || | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 275 | (be64_to_cpu(d->d_icount) < | 
|  | 276 | be64_to_cpu(d->d_ino_hardlimit)))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | d->d_itimer = 0; | 
|  | 278 | } | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | if (!d->d_rtbtimer) { | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 282 | if ((d->d_rtb_softlimit && | 
|  | 283 | (be64_to_cpu(d->d_rtbcount) >= | 
|  | 284 | be64_to_cpu(d->d_rtb_softlimit))) || | 
|  | 285 | (d->d_rtb_hardlimit && | 
|  | 286 | (be64_to_cpu(d->d_rtbcount) >= | 
|  | 287 | be64_to_cpu(d->d_rtb_hardlimit)))) { | 
|  | 288 | d->d_rtbtimer = cpu_to_be32(get_seconds() + | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 289 | mp->m_quotainfo->qi_rtbtimelimit); | 
| Nathan Scott | 754002b | 2005-06-21 15:49:06 +1000 | [diff] [blame] | 290 | } else { | 
|  | 291 | d->d_rtbwarns = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | } | 
|  | 293 | } else { | 
|  | 294 | if ((!d->d_rtb_softlimit || | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 295 | (be64_to_cpu(d->d_rtbcount) < | 
|  | 296 | be64_to_cpu(d->d_rtb_softlimit))) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | (!d->d_rtb_hardlimit || | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 298 | (be64_to_cpu(d->d_rtbcount) < | 
|  | 299 | be64_to_cpu(d->d_rtb_hardlimit)))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | d->d_rtbtimer = 0; | 
|  | 301 | } | 
|  | 302 | } | 
|  | 303 | } | 
|  | 304 |  | 
|  | 305 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | * initialize a buffer full of dquots and log the whole thing | 
|  | 307 | */ | 
|  | 308 | STATIC void | 
|  | 309 | xfs_qm_init_dquot_blk( | 
|  | 310 | xfs_trans_t	*tp, | 
|  | 311 | xfs_mount_t	*mp, | 
|  | 312 | xfs_dqid_t	id, | 
|  | 313 | uint		type, | 
|  | 314 | xfs_buf_t	*bp) | 
|  | 315 | { | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 316 | struct xfs_quotainfo	*q = mp->m_quotainfo; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | xfs_dqblk_t	*d; | 
|  | 318 | int		curid, i; | 
|  | 319 |  | 
|  | 320 | ASSERT(tp); | 
|  | 321 | ASSERT(XFS_BUF_ISBUSY(bp)); | 
|  | 322 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); | 
|  | 323 |  | 
|  | 324 | d = (xfs_dqblk_t *)XFS_BUF_PTR(bp); | 
|  | 325 |  | 
|  | 326 | /* | 
|  | 327 | * ID of the first dquot in the block - id's are zero based. | 
|  | 328 | */ | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 329 | curid = id - (id % q->qi_dqperchunk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | ASSERT(curid >= 0); | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 331 | memset(d, 0, BBTOB(q->qi_dqchunklen)); | 
|  | 332 | for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | xfs_qm_dqinit_core(curid, type, d); | 
|  | 334 | xfs_trans_dquot_buf(tp, bp, | 
| Dave Chinner | c115541 | 2010-05-07 11:05:19 +1000 | [diff] [blame] | 335 | (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : | 
|  | 336 | ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : | 
|  | 337 | XFS_BLF_GDQUOT_BUF))); | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 338 | xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | } | 
|  | 340 |  | 
|  | 341 |  | 
|  | 342 |  | 
|  | 343 | /* | 
|  | 344 | * Allocate a block and fill it with dquots. | 
|  | 345 | * This is called when the bmapi finds a hole. | 
|  | 346 | */ | 
|  | 347 | STATIC int | 
|  | 348 | xfs_qm_dqalloc( | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 349 | xfs_trans_t	**tpp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | xfs_mount_t	*mp, | 
|  | 351 | xfs_dquot_t	*dqp, | 
|  | 352 | xfs_inode_t	*quotip, | 
|  | 353 | xfs_fileoff_t	offset_fsb, | 
|  | 354 | xfs_buf_t	**O_bpp) | 
|  | 355 | { | 
|  | 356 | xfs_fsblock_t	firstblock; | 
|  | 357 | xfs_bmap_free_t flist; | 
|  | 358 | xfs_bmbt_irec_t map; | 
|  | 359 | int		nmaps, error, committed; | 
|  | 360 | xfs_buf_t	*bp; | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 361 | xfs_trans_t	*tp = *tpp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 |  | 
|  | 363 | ASSERT(tp != NULL); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 364 |  | 
|  | 365 | trace_xfs_dqalloc(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 |  | 
|  | 367 | /* | 
|  | 368 | * Initialize the bmap freelist prior to calling bmapi code. | 
|  | 369 | */ | 
| Eric Sandeen | 9d87c31 | 2009-01-14 23:22:07 -0600 | [diff] [blame] | 370 | xfs_bmap_init(&flist, &firstblock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | xfs_ilock(quotip, XFS_ILOCK_EXCL); | 
|  | 372 | /* | 
|  | 373 | * Return if this type of quotas is turned off while we didn't | 
|  | 374 | * have an inode lock | 
|  | 375 | */ | 
|  | 376 | if (XFS_IS_THIS_QUOTA_OFF(dqp)) { | 
|  | 377 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); | 
|  | 378 | return (ESRCH); | 
|  | 379 | } | 
|  | 380 |  | 
| Christoph Hellwig | 898621d | 2010-06-24 11:36:58 +1000 | [diff] [blame] | 381 | xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | nmaps = 1; | 
|  | 383 | if ((error = xfs_bmapi(tp, quotip, | 
|  | 384 | offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, | 
|  | 385 | XFS_BMAPI_METADATA | XFS_BMAPI_WRITE, | 
|  | 386 | &firstblock, | 
|  | 387 | XFS_QM_DQALLOC_SPACE_RES(mp), | 
| Christoph Hellwig | b4e9181 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 388 | &map, &nmaps, &flist))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | goto error0; | 
|  | 390 | } | 
|  | 391 | ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); | 
|  | 392 | ASSERT(nmaps == 1); | 
|  | 393 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && | 
|  | 394 | (map.br_startblock != HOLESTARTBLOCK)); | 
|  | 395 |  | 
|  | 396 | /* | 
|  | 397 | * Keep track of the blkno to save a lookup later | 
|  | 398 | */ | 
|  | 399 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); | 
|  | 400 |  | 
|  | 401 | /* now we can just get the buffer (there's nothing to read yet) */ | 
|  | 402 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, | 
|  | 403 | dqp->q_blkno, | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 404 | mp->m_quotainfo->qi_dqchunklen, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | 0); | 
|  | 406 | if (!bp || (error = XFS_BUF_GETERROR(bp))) | 
|  | 407 | goto error1; | 
|  | 408 | /* | 
|  | 409 | * Make a chunk of dquots out of this buffer and log | 
|  | 410 | * the entire thing. | 
|  | 411 | */ | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 412 | xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 413 | dqp->dq_flags & XFS_DQ_ALLTYPES, bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 |  | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 415 | /* | 
|  | 416 | * xfs_bmap_finish() may commit the current transaction and | 
|  | 417 | * start a second transaction if the freelist is not empty. | 
|  | 418 | * | 
|  | 419 | * Since we still want to modify this buffer, we need to | 
|  | 420 | * ensure that the buffer is not released on commit of | 
|  | 421 | * the first transaction and ensure the buffer is added to the | 
|  | 422 | * second transaction. | 
|  | 423 | * | 
|  | 424 | * If there is only one transaction then don't stop the buffer | 
|  | 425 | * from being released when it commits later on. | 
|  | 426 | */ | 
|  | 427 |  | 
|  | 428 | xfs_trans_bhold(tp, bp); | 
|  | 429 |  | 
| Eric Sandeen | f7c99b6 | 2007-02-10 18:37:16 +1100 | [diff] [blame] | 430 | if ((error = xfs_bmap_finish(tpp, &flist, &committed))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | goto error1; | 
|  | 432 | } | 
|  | 433 |  | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 434 | if (committed) { | 
|  | 435 | tp = *tpp; | 
|  | 436 | xfs_trans_bjoin(tp, bp); | 
|  | 437 | } else { | 
|  | 438 | xfs_trans_bhold_release(tp, bp); | 
|  | 439 | } | 
|  | 440 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | *O_bpp = bp; | 
|  | 442 | return 0; | 
|  | 443 |  | 
|  | 444 | error1: | 
|  | 445 | xfs_bmap_cancel(&flist); | 
|  | 446 | error0: | 
|  | 447 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); | 
|  | 448 |  | 
|  | 449 | return (error); | 
|  | 450 | } | 
|  | 451 |  | 
|  | 452 | /* | 
|  | 453 | * Maps a dquot to the buffer containing its on-disk version. | 
|  | 454 | * This returns a ptr to the buffer containing the on-disk dquot | 
|  | 455 | * in the bpp param, and a ptr to the on-disk dquot within that buffer | 
|  | 456 | */ | 
|  | 457 | STATIC int | 
|  | 458 | xfs_qm_dqtobp( | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 459 | xfs_trans_t		**tpp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | xfs_dquot_t		*dqp, | 
|  | 461 | xfs_disk_dquot_t	**O_ddpp, | 
|  | 462 | xfs_buf_t		**O_bpp, | 
|  | 463 | uint			flags) | 
|  | 464 | { | 
|  | 465 | xfs_bmbt_irec_t map; | 
|  | 466 | int		nmaps, error; | 
|  | 467 | xfs_buf_t	*bp; | 
|  | 468 | xfs_inode_t	*quotip; | 
|  | 469 | xfs_mount_t	*mp; | 
|  | 470 | xfs_disk_dquot_t *ddq; | 
|  | 471 | xfs_dqid_t	id; | 
|  | 472 | boolean_t	newdquot; | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 473 | xfs_trans_t	*tp = (tpp ? *tpp : NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 |  | 
|  | 475 | mp = dqp->q_mount; | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 476 | id = be32_to_cpu(dqp->q_core.d_id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | nmaps = 1; | 
|  | 478 | newdquot = B_FALSE; | 
|  | 479 |  | 
|  | 480 | /* | 
|  | 481 | * If we don't know where the dquot lives, find out. | 
|  | 482 | */ | 
|  | 483 | if (dqp->q_blkno == (xfs_daddr_t) 0) { | 
|  | 484 | /* We use the id as an index */ | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 485 | dqp->q_fileoffset = (xfs_fileoff_t)id / | 
|  | 486 | mp->m_quotainfo->qi_dqperchunk; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | nmaps = 1; | 
|  | 488 | quotip = XFS_DQ_TO_QIP(dqp); | 
|  | 489 | xfs_ilock(quotip, XFS_ILOCK_SHARED); | 
|  | 490 | /* | 
|  | 491 | * Return if this type of quotas is turned off while we didn't | 
|  | 492 | * have an inode lock | 
|  | 493 | */ | 
|  | 494 | if (XFS_IS_THIS_QUOTA_OFF(dqp)) { | 
|  | 495 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 
|  | 496 | return (ESRCH); | 
|  | 497 | } | 
|  | 498 | /* | 
|  | 499 | * Find the block map; no allocations yet | 
|  | 500 | */ | 
|  | 501 | error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, | 
|  | 502 | XFS_DQUOT_CLUSTER_SIZE_FSB, | 
|  | 503 | XFS_BMAPI_METADATA, | 
| Christoph Hellwig | b4e9181 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 504 | NULL, 0, &map, &nmaps, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 |  | 
|  | 506 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 
|  | 507 | if (error) | 
|  | 508 | return (error); | 
|  | 509 | ASSERT(nmaps == 1); | 
|  | 510 | ASSERT(map.br_blockcount == 1); | 
|  | 511 |  | 
|  | 512 | /* | 
|  | 513 | * offset of dquot in the (fixed sized) dquot chunk. | 
|  | 514 | */ | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 515 | dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | sizeof(xfs_dqblk_t); | 
|  | 517 | if (map.br_startblock == HOLESTARTBLOCK) { | 
|  | 518 | /* | 
|  | 519 | * We don't allocate unless we're asked to | 
|  | 520 | */ | 
|  | 521 | if (!(flags & XFS_QMOPT_DQALLOC)) | 
|  | 522 | return (ENOENT); | 
|  | 523 |  | 
|  | 524 | ASSERT(tp); | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 525 | if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | dqp->q_fileoffset, &bp))) | 
|  | 527 | return (error); | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 528 | tp = *tpp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | newdquot = B_TRUE; | 
|  | 530 | } else { | 
|  | 531 | /* | 
|  | 532 | * store the blkno etc so that we don't have to do the | 
|  | 533 | * mapping all the time | 
|  | 534 | */ | 
|  | 535 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); | 
|  | 536 | } | 
|  | 537 | } | 
|  | 538 | ASSERT(dqp->q_blkno != DELAYSTARTBLOCK); | 
|  | 539 | ASSERT(dqp->q_blkno != HOLESTARTBLOCK); | 
|  | 540 |  | 
|  | 541 | /* | 
|  | 542 | * Read in the buffer, unless we've just done the allocation | 
|  | 543 | * (in which case we already have the buf). | 
|  | 544 | */ | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 545 | if (!newdquot) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 546 | trace_xfs_dqtobp_read(dqp); | 
|  | 547 |  | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 548 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, | 
|  | 549 | dqp->q_blkno, | 
|  | 550 | mp->m_quotainfo->qi_dqchunklen, | 
|  | 551 | 0, &bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | if (error || !bp) | 
|  | 553 | return XFS_ERROR(error); | 
|  | 554 | } | 
|  | 555 | ASSERT(XFS_BUF_ISBUSY(bp)); | 
|  | 556 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); | 
|  | 557 |  | 
|  | 558 | /* | 
|  | 559 | * calculate the location of the dquot inside the buffer. | 
|  | 560 | */ | 
|  | 561 | ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset); | 
|  | 562 |  | 
|  | 563 | /* | 
|  | 564 | * A simple sanity check in case we got a corrupted dquot... | 
|  | 565 | */ | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 566 | if (xfs_qm_dqcheck(ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), | 
|  | 568 | "dqtobp")) { | 
|  | 569 | if (!(flags & XFS_QMOPT_DQREPAIR)) { | 
|  | 570 | xfs_trans_brelse(tp, bp); | 
|  | 571 | return XFS_ERROR(EIO); | 
|  | 572 | } | 
|  | 573 | XFS_BUF_BUSY(bp); /* We dirtied this */ | 
|  | 574 | } | 
|  | 575 |  | 
|  | 576 | *O_bpp = bp; | 
|  | 577 | *O_ddpp = ddq; | 
|  | 578 |  | 
|  | 579 | return (0); | 
|  | 580 | } | 
|  | 581 |  | 
|  | 582 |  | 
|  | 583 | /* | 
|  | 584 | * Read in the ondisk dquot using dqtobp() then copy it to an incore version, | 
|  | 585 | * and release the buffer immediately. | 
|  | 586 | * | 
|  | 587 | */ | 
|  | 588 | /* ARGSUSED */ | 
|  | 589 | STATIC int | 
|  | 590 | xfs_qm_dqread( | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 591 | xfs_trans_t	**tpp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | xfs_dqid_t	id, | 
|  | 593 | xfs_dquot_t	*dqp,	/* dquot to get filled in */ | 
|  | 594 | uint		flags) | 
|  | 595 | { | 
|  | 596 | xfs_disk_dquot_t *ddqp; | 
|  | 597 | xfs_buf_t	 *bp; | 
|  | 598 | int		 error; | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 599 | xfs_trans_t	 *tp; | 
|  | 600 |  | 
|  | 601 | ASSERT(tpp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 |  | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 603 | trace_xfs_dqread(dqp); | 
|  | 604 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | /* | 
|  | 606 | * get a pointer to the on-disk dquot and the buffer containing it | 
|  | 607 | * dqp already knows its own type (GROUP/USER). | 
|  | 608 | */ | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 609 | if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | return (error); | 
|  | 611 | } | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 612 | tp = *tpp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 |  | 
|  | 614 | /* copy everything from disk dquot to the incore dquot */ | 
|  | 615 | memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 616 | ASSERT(be32_to_cpu(dqp->q_core.d_id) == id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | xfs_qm_dquot_logitem_init(dqp); | 
|  | 618 |  | 
|  | 619 | /* | 
|  | 620 | * Reservation counters are defined as reservation plus current usage | 
|  | 621 | * to avoid having to add everytime. | 
|  | 622 | */ | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 623 | dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); | 
|  | 624 | dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); | 
|  | 625 | dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 |  | 
|  | 627 | /* Mark the buf so that this will stay incore a little longer */ | 
|  | 628 | XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF); | 
|  | 629 |  | 
|  | 630 | /* | 
|  | 631 | * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) | 
|  | 632 | * So we need to release with xfs_trans_brelse(). | 
|  | 633 | * The strategy here is identical to that of inodes; we lock | 
|  | 634 | * the dquot in xfs_qm_dqget() before making it accessible to | 
|  | 635 | * others. This is because dquots, like inodes, need a good level of | 
|  | 636 | * concurrency, and we don't want to take locks on the entire buffers | 
|  | 637 | * for dquot accesses. | 
|  | 638 | * Note also that the dquot buffer may even be dirty at this point, if | 
|  | 639 | * this particular dquot was repaired. We still aren't afraid to | 
|  | 640 | * brelse it because we have the changes incore. | 
|  | 641 | */ | 
|  | 642 | ASSERT(XFS_BUF_ISBUSY(bp)); | 
|  | 643 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); | 
|  | 644 | xfs_trans_brelse(tp, bp); | 
|  | 645 |  | 
|  | 646 | return (error); | 
|  | 647 | } | 
|  | 648 |  | 
|  | 649 |  | 
|  | 650 | /* | 
|  | 651 | * allocate an incore dquot from the kernel heap, | 
|  | 652 | * and fill its core with quota information kept on disk. | 
|  | 653 | * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk | 
|  | 654 | * if it wasn't already allocated. | 
|  | 655 | */ | 
|  | 656 | STATIC int | 
|  | 657 | xfs_qm_idtodq( | 
|  | 658 | xfs_mount_t	*mp, | 
|  | 659 | xfs_dqid_t	id,	 /* gid or uid, depending on type */ | 
|  | 660 | uint		type,	 /* UDQUOT or GDQUOT */ | 
|  | 661 | uint		flags,	 /* DQALLOC, DQREPAIR */ | 
|  | 662 | xfs_dquot_t	**O_dqpp)/* OUT : incore dquot, not locked */ | 
|  | 663 | { | 
|  | 664 | xfs_dquot_t	*dqp; | 
|  | 665 | int		error; | 
|  | 666 | xfs_trans_t	*tp; | 
|  | 667 | int		cancelflags=0; | 
|  | 668 |  | 
|  | 669 | dqp = xfs_qm_dqinit(mp, id, type); | 
|  | 670 | tp = NULL; | 
|  | 671 | if (flags & XFS_QMOPT_DQALLOC) { | 
|  | 672 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 673 | error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp), | 
|  | 674 | XFS_WRITE_LOG_RES(mp) + | 
|  | 675 | BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + | 
|  | 676 | 128, | 
|  | 677 | 0, | 
|  | 678 | XFS_TRANS_PERM_LOG_RES, | 
|  | 679 | XFS_WRITE_LOG_COUNT); | 
|  | 680 | if (error) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | cancelflags = 0; | 
|  | 682 | goto error0; | 
|  | 683 | } | 
|  | 684 | cancelflags = XFS_TRANS_RELEASE_LOG_RES; | 
|  | 685 | } | 
|  | 686 |  | 
|  | 687 | /* | 
|  | 688 | * Read it from disk; xfs_dqread() takes care of | 
|  | 689 | * all the necessary initialization of dquot's fields (locks, etc) | 
|  | 690 | */ | 
| Tim Shimmin | efa092f | 2005-09-05 08:29:01 +1000 | [diff] [blame] | 691 | if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | /* | 
|  | 693 | * This can happen if quotas got turned off (ESRCH), | 
|  | 694 | * or if the dquot didn't exist on disk and we ask to | 
|  | 695 | * allocate (ENOENT). | 
|  | 696 | */ | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 697 | trace_xfs_dqread_fail(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | cancelflags |= XFS_TRANS_ABORT; | 
|  | 699 | goto error0; | 
|  | 700 | } | 
|  | 701 | if (tp) { | 
| Eric Sandeen | 1c72bf9 | 2007-05-08 13:48:42 +1000 | [diff] [blame] | 702 | if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | goto error1; | 
|  | 704 | } | 
|  | 705 |  | 
|  | 706 | *O_dqpp = dqp; | 
|  | 707 | return (0); | 
|  | 708 |  | 
|  | 709 | error0: | 
|  | 710 | ASSERT(error); | 
|  | 711 | if (tp) | 
|  | 712 | xfs_trans_cancel(tp, cancelflags); | 
|  | 713 | error1: | 
|  | 714 | xfs_qm_dqdestroy(dqp); | 
|  | 715 | *O_dqpp = NULL; | 
|  | 716 | return (error); | 
|  | 717 | } | 
|  | 718 |  | 
|  | 719 | /* | 
|  | 720 | * Lookup a dquot in the incore dquot hashtable. We keep two separate | 
|  | 721 | * hashtables for user and group dquots; and, these are global tables | 
|  | 722 | * inside the XQM, not per-filesystem tables. | 
|  | 723 | * The hash chain must be locked by caller, and it is left locked | 
|  | 724 | * on return. Returning dquot is locked. | 
|  | 725 | */ | 
|  | 726 | STATIC int | 
|  | 727 | xfs_qm_dqlookup( | 
|  | 728 | xfs_mount_t		*mp, | 
|  | 729 | xfs_dqid_t		id, | 
|  | 730 | xfs_dqhash_t		*qh, | 
|  | 731 | xfs_dquot_t		**O_dqpp) | 
|  | 732 | { | 
|  | 733 | xfs_dquot_t		*dqp; | 
|  | 734 | uint			flist_locked; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 |  | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 736 | ASSERT(mutex_is_locked(&qh->qh_lock)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 |  | 
|  | 738 | flist_locked = B_FALSE; | 
|  | 739 |  | 
|  | 740 | /* | 
|  | 741 | * Traverse the hashchain looking for a match | 
|  | 742 | */ | 
| Dave Chinner | e6a81f1 | 2010-04-13 15:06:51 +1000 | [diff] [blame] | 743 | list_for_each_entry(dqp, &qh->qh_list, q_hashlist) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | /* | 
|  | 745 | * We already have the hashlock. We don't need the | 
|  | 746 | * dqlock to look at the id field of the dquot, since the | 
|  | 747 | * id can't be modified without the hashlock anyway. | 
|  | 748 | */ | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 749 | if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 750 | trace_xfs_dqlookup_found(dqp); | 
|  | 751 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | /* | 
|  | 753 | * All in core dquots must be on the dqlist of mp | 
|  | 754 | */ | 
| Dave Chinner | 3a25404 | 2010-04-13 15:06:48 +1000 | [diff] [blame] | 755 | ASSERT(!list_empty(&dqp->q_mplist)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 |  | 
|  | 757 | xfs_dqlock(dqp); | 
|  | 758 | if (dqp->q_nrefs == 0) { | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 759 | ASSERT(!list_empty(&dqp->q_freelist)); | 
|  | 760 | if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 761 | trace_xfs_dqlookup_want(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 |  | 
|  | 763 | /* | 
|  | 764 | * We may have raced with dqreclaim_one() | 
|  | 765 | * (and lost). So, flag that we don't | 
|  | 766 | * want the dquot to be reclaimed. | 
|  | 767 | */ | 
|  | 768 | dqp->dq_flags |= XFS_DQ_WANT; | 
|  | 769 | xfs_dqunlock(dqp); | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 770 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | xfs_dqlock(dqp); | 
|  | 772 | dqp->dq_flags &= ~(XFS_DQ_WANT); | 
|  | 773 | } | 
|  | 774 | flist_locked = B_TRUE; | 
|  | 775 | } | 
|  | 776 |  | 
|  | 777 | /* | 
|  | 778 | * id couldn't have changed; we had the hashlock all | 
|  | 779 | * along | 
|  | 780 | */ | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 781 | ASSERT(be32_to_cpu(dqp->q_core.d_id) == id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 |  | 
|  | 783 | if (flist_locked) { | 
|  | 784 | if (dqp->q_nrefs != 0) { | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 785 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | flist_locked = B_FALSE; | 
|  | 787 | } else { | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 788 | /* take it off the freelist */ | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 789 | trace_xfs_dqlookup_freelist(dqp); | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 790 | list_del_init(&dqp->q_freelist); | 
|  | 791 | xfs_Gqm->qm_dqfrlist_cnt--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | } | 
|  | 793 | } | 
|  | 794 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | XFS_DQHOLD(dqp); | 
|  | 796 |  | 
|  | 797 | if (flist_locked) | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 798 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | /* | 
|  | 800 | * move the dquot to the front of the hashchain | 
|  | 801 | */ | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 802 | ASSERT(mutex_is_locked(&qh->qh_lock)); | 
| Dave Chinner | e6a81f1 | 2010-04-13 15:06:51 +1000 | [diff] [blame] | 803 | list_move(&dqp->q_hashlist, &qh->qh_list); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 804 | trace_xfs_dqlookup_done(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | *O_dqpp = dqp; | 
| Dave Chinner | e6a81f1 | 2010-04-13 15:06:51 +1000 | [diff] [blame] | 806 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | } | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | *O_dqpp = NULL; | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 811 | ASSERT(mutex_is_locked(&qh->qh_lock)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | return (1); | 
|  | 813 | } | 
|  | 814 |  | 
|  | 815 | /* | 
|  | 816 | * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a | 
|  | 817 | * a locked dquot, doing an allocation (if requested) as needed. | 
|  | 818 | * When both an inode and an id are given, the inode's id takes precedence. | 
|  | 819 | * That is, if the id changes while we don't hold the ilock inside this | 
|  | 820 | * function, the new dquot is returned, not necessarily the one requested | 
|  | 821 | * in the id argument. | 
|  | 822 | */ | 
|  | 823 | int | 
|  | 824 | xfs_qm_dqget( | 
|  | 825 | xfs_mount_t	*mp, | 
|  | 826 | xfs_inode_t	*ip,	  /* locked inode (optional) */ | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 827 | xfs_dqid_t	id,	  /* uid/projid/gid depending on type */ | 
|  | 828 | uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ | 
|  | 830 | xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */ | 
|  | 831 | { | 
|  | 832 | xfs_dquot_t	*dqp; | 
|  | 833 | xfs_dqhash_t	*h; | 
|  | 834 | uint		version; | 
|  | 835 | int		error; | 
|  | 836 |  | 
|  | 837 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 
|  | 838 | if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 839 | (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { | 
|  | 841 | return (ESRCH); | 
|  | 842 | } | 
|  | 843 | h = XFS_DQ_HASH(mp, id, type); | 
|  | 844 |  | 
|  | 845 | #ifdef DEBUG | 
|  | 846 | if (xfs_do_dqerror) { | 
|  | 847 | if ((xfs_dqerror_target == mp->m_ddev_targp) && | 
|  | 848 | (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { | 
|  | 849 | cmn_err(CE_DEBUG, "Returning error in dqget"); | 
|  | 850 | return (EIO); | 
|  | 851 | } | 
|  | 852 | } | 
|  | 853 | #endif | 
|  | 854 |  | 
|  | 855 | again: | 
|  | 856 |  | 
|  | 857 | #ifdef DEBUG | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 858 | ASSERT(type == XFS_DQ_USER || | 
|  | 859 | type == XFS_DQ_PROJ || | 
|  | 860 | type == XFS_DQ_GROUP); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | if (ip) { | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 862 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | if (type == XFS_DQ_USER) | 
|  | 864 | ASSERT(ip->i_udquot == NULL); | 
|  | 865 | else | 
|  | 866 | ASSERT(ip->i_gdquot == NULL); | 
|  | 867 | } | 
|  | 868 | #endif | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 869 | mutex_lock(&h->qh_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 |  | 
|  | 871 | /* | 
|  | 872 | * Look in the cache (hashtable). | 
|  | 873 | * The chain is kept locked during lookup. | 
|  | 874 | */ | 
|  | 875 | if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) { | 
|  | 876 | XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); | 
|  | 877 | /* | 
|  | 878 | * The dquot was found, moved to the front of the chain, | 
|  | 879 | * taken off the freelist if it was on it, and locked | 
|  | 880 | * at this point. Just unlock the hashchain and return. | 
|  | 881 | */ | 
|  | 882 | ASSERT(*O_dqpp); | 
|  | 883 | ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 884 | mutex_unlock(&h->qh_lock); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 885 | trace_xfs_dqget_hit(*O_dqpp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | return (0);	/* success */ | 
|  | 887 | } | 
|  | 888 | XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); | 
|  | 889 |  | 
|  | 890 | /* | 
|  | 891 | * Dquot cache miss. We don't want to keep the inode lock across | 
|  | 892 | * a (potential) disk read. Also we don't want to deal with the lock | 
|  | 893 | * ordering between quotainode and this inode. OTOH, dropping the inode | 
|  | 894 | * lock here means dealing with a chown that can happen before | 
|  | 895 | * we re-acquire the lock. | 
|  | 896 | */ | 
|  | 897 | if (ip) | 
|  | 898 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 
|  | 899 | /* | 
|  | 900 | * Save the hashchain version stamp, and unlock the chain, so that | 
|  | 901 | * we don't keep the lock across a disk read | 
|  | 902 | */ | 
|  | 903 | version = h->qh_version; | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 904 | mutex_unlock(&h->qh_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 |  | 
|  | 906 | /* | 
|  | 907 | * Allocate the dquot on the kernel heap, and read the ondisk | 
|  | 908 | * portion off the disk. Also, do all the necessary initialization | 
|  | 909 | * This can return ENOENT if dquot didn't exist on disk and we didn't | 
|  | 910 | * ask it to allocate; ESRCH if quotas got turned off suddenly. | 
|  | 911 | */ | 
|  | 912 | if ((error = xfs_qm_idtodq(mp, id, type, | 
|  | 913 | flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR| | 
|  | 914 | XFS_QMOPT_DOWARN), | 
|  | 915 | &dqp))) { | 
|  | 916 | if (ip) | 
|  | 917 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
|  | 918 | return (error); | 
|  | 919 | } | 
|  | 920 |  | 
|  | 921 | /* | 
|  | 922 | * See if this is mount code calling to look at the overall quota limits | 
|  | 923 | * which are stored in the id == 0 user or group's dquot. | 
|  | 924 | * Since we may not have done a quotacheck by this point, just return | 
|  | 925 | * the dquot without attaching it to any hashtables, lists, etc, or even | 
|  | 926 | * taking a reference. | 
|  | 927 | * The caller must dqdestroy this once done. | 
|  | 928 | */ | 
|  | 929 | if (flags & XFS_QMOPT_DQSUSER) { | 
|  | 930 | ASSERT(id == 0); | 
|  | 931 | ASSERT(! ip); | 
|  | 932 | goto dqret; | 
|  | 933 | } | 
|  | 934 |  | 
|  | 935 | /* | 
|  | 936 | * Dquot lock comes after hashlock in the lock ordering | 
|  | 937 | */ | 
|  | 938 | if (ip) { | 
|  | 939 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 
| Christoph Hellwig | 191f848 | 2010-04-20 17:01:53 +1000 | [diff] [blame] | 940 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | /* | 
|  | 942 | * A dquot could be attached to this inode by now, since | 
|  | 943 | * we had dropped the ilock. | 
|  | 944 | */ | 
|  | 945 | if (type == XFS_DQ_USER) { | 
| Christoph Hellwig | 191f848 | 2010-04-20 17:01:53 +1000 | [diff] [blame] | 946 | if (!XFS_IS_UQUOTA_ON(mp)) { | 
|  | 947 | /* inode stays locked on return */ | 
|  | 948 | xfs_qm_dqdestroy(dqp); | 
|  | 949 | return XFS_ERROR(ESRCH); | 
|  | 950 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | if (ip->i_udquot) { | 
|  | 952 | xfs_qm_dqdestroy(dqp); | 
|  | 953 | dqp = ip->i_udquot; | 
|  | 954 | xfs_dqlock(dqp); | 
|  | 955 | goto dqret; | 
|  | 956 | } | 
|  | 957 | } else { | 
| Christoph Hellwig | 191f848 | 2010-04-20 17:01:53 +1000 | [diff] [blame] | 958 | if (!XFS_IS_OQUOTA_ON(mp)) { | 
|  | 959 | /* inode stays locked on return */ | 
|  | 960 | xfs_qm_dqdestroy(dqp); | 
|  | 961 | return XFS_ERROR(ESRCH); | 
|  | 962 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | if (ip->i_gdquot) { | 
|  | 964 | xfs_qm_dqdestroy(dqp); | 
|  | 965 | dqp = ip->i_gdquot; | 
|  | 966 | xfs_dqlock(dqp); | 
|  | 967 | goto dqret; | 
|  | 968 | } | 
|  | 969 | } | 
|  | 970 | } | 
|  | 971 |  | 
|  | 972 | /* | 
|  | 973 | * Hashlock comes after ilock in lock order | 
|  | 974 | */ | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 975 | mutex_lock(&h->qh_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | if (version != h->qh_version) { | 
|  | 977 | xfs_dquot_t *tmpdqp; | 
|  | 978 | /* | 
|  | 979 | * Now, see if somebody else put the dquot in the | 
|  | 980 | * hashtable before us. This can happen because we didn't | 
|  | 981 | * keep the hashchain lock. We don't have to worry about | 
|  | 982 | * lock order between the two dquots here since dqp isn't | 
|  | 983 | * on any findable lists yet. | 
|  | 984 | */ | 
|  | 985 | if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) { | 
|  | 986 | /* | 
|  | 987 | * Duplicate found. Just throw away the new dquot | 
|  | 988 | * and start over. | 
|  | 989 | */ | 
|  | 990 | xfs_qm_dqput(tmpdqp); | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 991 | mutex_unlock(&h->qh_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | xfs_qm_dqdestroy(dqp); | 
|  | 993 | XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); | 
|  | 994 | goto again; | 
|  | 995 | } | 
|  | 996 | } | 
|  | 997 |  | 
|  | 998 | /* | 
|  | 999 | * Put the dquot at the beginning of the hash-chain and mp's list | 
|  | 1000 | * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. | 
|  | 1001 | */ | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 1002 | ASSERT(mutex_is_locked(&h->qh_lock)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | dqp->q_hash = h; | 
| Dave Chinner | e6a81f1 | 2010-04-13 15:06:51 +1000 | [diff] [blame] | 1004 | list_add(&dqp->q_hashlist, &h->qh_list); | 
|  | 1005 | h->qh_version++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 |  | 
|  | 1007 | /* | 
|  | 1008 | * Attach this dquot to this filesystem's list of all dquots, | 
|  | 1009 | * kept inside the mount structure in m_quotainfo field | 
|  | 1010 | */ | 
| Dave Chinner | 3a25404 | 2010-04-13 15:06:48 +1000 | [diff] [blame] | 1011 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 |  | 
|  | 1013 | /* | 
|  | 1014 | * We return a locked dquot to the caller, with a reference taken | 
|  | 1015 | */ | 
|  | 1016 | xfs_dqlock(dqp); | 
|  | 1017 | dqp->q_nrefs = 1; | 
|  | 1018 |  | 
| Dave Chinner | 3a25404 | 2010-04-13 15:06:48 +1000 | [diff] [blame] | 1019 | list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist); | 
|  | 1020 | mp->m_quotainfo->qi_dquots++; | 
|  | 1021 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 1022 | mutex_unlock(&h->qh_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 | dqret: | 
| Christoph Hellwig | 579aa9ca | 2008-04-22 17:34:00 +1000 | [diff] [blame] | 1024 | ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1025 | trace_xfs_dqget_miss(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1026 | *O_dqpp = dqp; | 
|  | 1027 | return (0); | 
|  | 1028 | } | 
|  | 1029 |  | 
|  | 1030 |  | 
|  | 1031 | /* | 
|  | 1032 | * Release a reference to the dquot (decrement ref-count) | 
|  | 1033 | * and unlock it. If there is a group quota attached to this | 
|  | 1034 | * dquot, carefully release that too without tripping over | 
|  | 1035 | * deadlocks'n'stuff. | 
|  | 1036 | */ | 
|  | 1037 | void | 
|  | 1038 | xfs_qm_dqput( | 
|  | 1039 | xfs_dquot_t	*dqp) | 
|  | 1040 | { | 
|  | 1041 | xfs_dquot_t	*gdqp; | 
|  | 1042 |  | 
|  | 1043 | ASSERT(dqp->q_nrefs > 0); | 
|  | 1044 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1045 |  | 
|  | 1046 | trace_xfs_dqput(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 |  | 
|  | 1048 | if (dqp->q_nrefs != 1) { | 
|  | 1049 | dqp->q_nrefs--; | 
|  | 1050 | xfs_dqunlock(dqp); | 
|  | 1051 | return; | 
|  | 1052 | } | 
|  | 1053 |  | 
|  | 1054 | /* | 
|  | 1055 | * drop the dqlock and acquire the freelist and dqlock | 
|  | 1056 | * in the right order; but try to get it out-of-order first | 
|  | 1057 | */ | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 1058 | if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1059 | trace_xfs_dqput_wait(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1060 | xfs_dqunlock(dqp); | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 1061 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | xfs_dqlock(dqp); | 
|  | 1063 | } | 
|  | 1064 |  | 
|  | 1065 | while (1) { | 
|  | 1066 | gdqp = NULL; | 
|  | 1067 |  | 
|  | 1068 | /* We can't depend on nrefs being == 1 here */ | 
|  | 1069 | if (--dqp->q_nrefs == 0) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1070 | trace_xfs_dqput_free(dqp); | 
|  | 1071 |  | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 1072 | list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); | 
|  | 1073 | xfs_Gqm->qm_dqfrlist_cnt++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 |  | 
|  | 1075 | /* | 
|  | 1076 | * If we just added a udquot to the freelist, then | 
|  | 1077 | * we want to release the gdquot reference that | 
|  | 1078 | * it (probably) has. Otherwise it'll keep the | 
|  | 1079 | * gdquot from getting reclaimed. | 
|  | 1080 | */ | 
|  | 1081 | if ((gdqp = dqp->q_gdquot)) { | 
|  | 1082 | /* | 
|  | 1083 | * Avoid a recursive dqput call | 
|  | 1084 | */ | 
|  | 1085 | xfs_dqlock(gdqp); | 
|  | 1086 | dqp->q_gdquot = NULL; | 
|  | 1087 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | } | 
|  | 1089 | xfs_dqunlock(dqp); | 
|  | 1090 |  | 
|  | 1091 | /* | 
|  | 1092 | * If we had a group quota inside the user quota as a hint, | 
|  | 1093 | * release it now. | 
|  | 1094 | */ | 
|  | 1095 | if (! gdqp) | 
|  | 1096 | break; | 
|  | 1097 | dqp = gdqp; | 
|  | 1098 | } | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 1099 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 | } | 
|  | 1101 |  | 
|  | 1102 | /* | 
|  | 1103 | * Release a dquot. Flush it if dirty, then dqput() it. | 
|  | 1104 | * dquot must not be locked. | 
|  | 1105 | */ | 
|  | 1106 | void | 
|  | 1107 | xfs_qm_dqrele( | 
|  | 1108 | xfs_dquot_t	*dqp) | 
|  | 1109 | { | 
| Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 1110 | if (!dqp) | 
|  | 1111 | return; | 
|  | 1112 |  | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1113 | trace_xfs_dqrele(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 |  | 
|  | 1115 | xfs_dqlock(dqp); | 
|  | 1116 | /* | 
|  | 1117 | * We don't care to flush it if the dquot is dirty here. | 
|  | 1118 | * That will create stutters that we want to avoid. | 
|  | 1119 | * Instead we do a delayed write when we try to reclaim | 
|  | 1120 | * a dirty dquot. Also xfs_sync will take part of the burden... | 
|  | 1121 | */ | 
|  | 1122 | xfs_qm_dqput(dqp); | 
|  | 1123 | } | 
|  | 1124 |  | 
| Christoph Hellwig | ca30b2a | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1125 | /* | 
|  | 1126 | * This is the dquot flushing I/O completion routine.  It is called | 
|  | 1127 | * from interrupt level when the buffer containing the dquot is | 
|  | 1128 | * flushed to disk.  It is responsible for removing the dquot logitem | 
|  | 1129 | * from the AIL if it has not been re-logged, and unlocking the dquot's | 
|  | 1130 | * flush lock. This behavior is very similar to that of inodes.. | 
|  | 1131 | */ | 
|  | 1132 | STATIC void | 
|  | 1133 | xfs_qm_dqflush_done( | 
|  | 1134 | struct xfs_buf		*bp, | 
|  | 1135 | struct xfs_log_item	*lip) | 
|  | 1136 | { | 
|  | 1137 | xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip; | 
|  | 1138 | xfs_dquot_t		*dqp = qip->qli_dquot; | 
|  | 1139 | struct xfs_ail		*ailp = lip->li_ailp; | 
|  | 1140 |  | 
|  | 1141 | /* | 
|  | 1142 | * We only want to pull the item from the AIL if its | 
|  | 1143 | * location in the log has not changed since we started the flush. | 
|  | 1144 | * Thus, we only bother if the dquot's lsn has | 
|  | 1145 | * not changed. First we check the lsn outside the lock | 
|  | 1146 | * since it's cheaper, and then we recheck while | 
|  | 1147 | * holding the lock before removing the dquot from the AIL. | 
|  | 1148 | */ | 
|  | 1149 | if ((lip->li_flags & XFS_LI_IN_AIL) && | 
|  | 1150 | lip->li_lsn == qip->qli_flush_lsn) { | 
|  | 1151 |  | 
|  | 1152 | /* xfs_trans_ail_delete() drops the AIL lock. */ | 
|  | 1153 | spin_lock(&ailp->xa_lock); | 
|  | 1154 | if (lip->li_lsn == qip->qli_flush_lsn) | 
|  | 1155 | xfs_trans_ail_delete(ailp, lip); | 
|  | 1156 | else | 
|  | 1157 | spin_unlock(&ailp->xa_lock); | 
|  | 1158 | } | 
|  | 1159 |  | 
|  | 1160 | /* | 
|  | 1161 | * Release the dq's flush lock since we're done with it. | 
|  | 1162 | */ | 
|  | 1163 | xfs_dqfunlock(dqp); | 
|  | 1164 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 |  | 
|  | 1166 | /* | 
|  | 1167 | * Write a modified dquot to disk. | 
|  | 1168 | * The dquot must be locked and the flush lock too taken by caller. | 
|  | 1169 | * The flush lock will not be unlocked until the dquot reaches the disk, | 
|  | 1170 | * but the dquot is free to be unlocked and modified by the caller | 
|  | 1171 | * in the interim. Dquot is still locked on return. This behavior is | 
|  | 1172 | * identical to that of inodes. | 
|  | 1173 | */ | 
|  | 1174 | int | 
|  | 1175 | xfs_qm_dqflush( | 
|  | 1176 | xfs_dquot_t		*dqp, | 
|  | 1177 | uint			flags) | 
|  | 1178 | { | 
|  | 1179 | xfs_mount_t		*mp; | 
|  | 1180 | xfs_buf_t		*bp; | 
|  | 1181 | xfs_disk_dquot_t	*ddqp; | 
|  | 1182 | int			error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 |  | 
|  | 1184 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 
| David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 1185 | ASSERT(!completion_done(&dqp->q_flush)); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1186 | trace_xfs_dqflush(dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 |  | 
|  | 1188 | /* | 
| David Chinner | 2f8a3ce | 2008-10-30 17:07:20 +1100 | [diff] [blame] | 1189 | * If not dirty, or it's pinned and we are not supposed to | 
|  | 1190 | * block, nada. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | */ | 
| David Chinner | 2f8a3ce | 2008-10-30 17:07:20 +1100 | [diff] [blame] | 1192 | if (!XFS_DQ_IS_DIRTY(dqp) || | 
| Dave Chinner | 20026d9 | 2010-02-04 09:48:58 +1100 | [diff] [blame] | 1193 | (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | xfs_dqfunlock(dqp); | 
| David Chinner | 2f8a3ce | 2008-10-30 17:07:20 +1100 | [diff] [blame] | 1195 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | xfs_qm_dqunpin_wait(dqp); | 
|  | 1198 |  | 
|  | 1199 | /* | 
|  | 1200 | * This may have been unpinned because the filesystem is shutting | 
|  | 1201 | * down forcibly. If that's the case we must not write this dquot | 
|  | 1202 | * to disk, because the log record didn't make it to disk! | 
|  | 1203 | */ | 
|  | 1204 | if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) { | 
|  | 1205 | dqp->dq_flags &= ~(XFS_DQ_DIRTY); | 
|  | 1206 | xfs_dqfunlock(dqp); | 
|  | 1207 | return XFS_ERROR(EIO); | 
|  | 1208 | } | 
|  | 1209 |  | 
|  | 1210 | /* | 
|  | 1211 | * Get the buffer containing the on-disk dquot | 
|  | 1212 | * We don't need a transaction envelope because we know that the | 
|  | 1213 | * the ondisk-dquot has already been allocated for. | 
|  | 1214 | */ | 
|  | 1215 | if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | ASSERT(error != ENOENT); | 
|  | 1217 | /* | 
|  | 1218 | * Quotas could have gotten turned off (ESRCH) | 
|  | 1219 | */ | 
|  | 1220 | xfs_dqfunlock(dqp); | 
|  | 1221 | return (error); | 
|  | 1222 | } | 
|  | 1223 |  | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1224 | if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1225 | 0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) { | 
| Nathan Scott | 7d04a33 | 2006-06-09 14:58:38 +1000 | [diff] [blame] | 1226 | xfs_force_shutdown(dqp->q_mount, SHUTDOWN_CORRUPT_INCORE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | return XFS_ERROR(EIO); | 
|  | 1228 | } | 
|  | 1229 |  | 
|  | 1230 | /* This is the only portion of data that needs to persist */ | 
|  | 1231 | memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t)); | 
|  | 1232 |  | 
|  | 1233 | /* | 
|  | 1234 | * Clear the dirty field and remember the flush lsn for later use. | 
|  | 1235 | */ | 
|  | 1236 | dqp->dq_flags &= ~(XFS_DQ_DIRTY); | 
|  | 1237 | mp = dqp->q_mount; | 
|  | 1238 |  | 
| David Chinner | 7b2e2a3 | 2008-10-30 17:39:12 +1100 | [diff] [blame] | 1239 | xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, | 
|  | 1240 | &dqp->q_logitem.qli_item.li_lsn); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 |  | 
|  | 1242 | /* | 
|  | 1243 | * Attach an iodone routine so that we can remove this dquot from the | 
|  | 1244 | * AIL and release the flush lock once the dquot is synced to disk. | 
|  | 1245 | */ | 
| Christoph Hellwig | ca30b2a | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 1246 | xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, | 
|  | 1247 | &dqp->q_logitem.qli_item); | 
|  | 1248 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | /* | 
|  | 1250 | * If the buffer is pinned then push on the log so we won't | 
|  | 1251 | * get stuck waiting in the write for too long. | 
|  | 1252 | */ | 
|  | 1253 | if (XFS_BUF_ISPINNED(bp)) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1254 | trace_xfs_dqflush_force(dqp); | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 1255 | xfs_log_force(mp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 | } | 
|  | 1257 |  | 
| Dave Chinner | 20026d9 | 2010-02-04 09:48:58 +1100 | [diff] [blame] | 1258 | if (flags & SYNC_WAIT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | error = xfs_bwrite(mp, bp); | 
| Dave Chinner | 20026d9 | 2010-02-04 09:48:58 +1100 | [diff] [blame] | 1260 | else | 
|  | 1261 | xfs_bdwrite(mp, bp); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1262 |  | 
|  | 1263 | trace_xfs_dqflush_done(dqp); | 
|  | 1264 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1265 | /* | 
|  | 1266 | * dqp is still locked, but caller is free to unlock it now. | 
|  | 1267 | */ | 
| Dave Chinner | 20026d9 | 2010-02-04 09:48:58 +1100 | [diff] [blame] | 1268 | return error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 |  | 
|  | 1270 | } | 
|  | 1271 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | int | 
|  | 1273 | xfs_qm_dqlock_nowait( | 
|  | 1274 | xfs_dquot_t *dqp) | 
|  | 1275 | { | 
| David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 1276 | return mutex_trylock(&dqp->q_qlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | } | 
|  | 1278 |  | 
|  | 1279 | void | 
|  | 1280 | xfs_dqlock( | 
|  | 1281 | xfs_dquot_t *dqp) | 
|  | 1282 | { | 
| David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 1283 | mutex_lock(&dqp->q_qlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 | } | 
|  | 1285 |  | 
|  | 1286 | void | 
|  | 1287 | xfs_dqunlock( | 
|  | 1288 | xfs_dquot_t *dqp) | 
|  | 1289 | { | 
|  | 1290 | mutex_unlock(&(dqp->q_qlock)); | 
|  | 1291 | if (dqp->q_logitem.qli_dquot == dqp) { | 
|  | 1292 | /* Once was dqp->q_mount, but might just have been cleared */ | 
| David Chinner | 783a2f6 | 2008-10-30 17:39:58 +1100 | [diff] [blame] | 1293 | xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | (xfs_log_item_t*)&(dqp->q_logitem)); | 
|  | 1295 | } | 
|  | 1296 | } | 
|  | 1297 |  | 
|  | 1298 |  | 
|  | 1299 | void | 
|  | 1300 | xfs_dqunlock_nonotify( | 
|  | 1301 | xfs_dquot_t *dqp) | 
|  | 1302 | { | 
|  | 1303 | mutex_unlock(&(dqp->q_qlock)); | 
|  | 1304 | } | 
|  | 1305 |  | 
| Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 1306 | /* | 
|  | 1307 | * Lock two xfs_dquot structures. | 
|  | 1308 | * | 
|  | 1309 | * To avoid deadlocks we always lock the quota structure with | 
|  | 1310 | * the lowerd id first. | 
|  | 1311 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | void | 
|  | 1313 | xfs_dqlock2( | 
|  | 1314 | xfs_dquot_t	*d1, | 
|  | 1315 | xfs_dquot_t	*d2) | 
|  | 1316 | { | 
|  | 1317 | if (d1 && d2) { | 
|  | 1318 | ASSERT(d1 != d2); | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1319 | if (be32_to_cpu(d1->q_core.d_id) > | 
|  | 1320 | be32_to_cpu(d2->q_core.d_id)) { | 
| Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 1321 | mutex_lock(&d2->q_qlock); | 
|  | 1322 | mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 | } else { | 
| Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 1324 | mutex_lock(&d1->q_qlock); | 
|  | 1325 | mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 | } | 
| Christoph Hellwig | 5bb87a3 | 2009-01-19 02:03:19 +0100 | [diff] [blame] | 1327 | } else if (d1) { | 
|  | 1328 | mutex_lock(&d1->q_qlock); | 
|  | 1329 | } else if (d2) { | 
|  | 1330 | mutex_lock(&d2->q_qlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | } | 
|  | 1332 | } | 
|  | 1333 |  | 
|  | 1334 |  | 
|  | 1335 | /* | 
|  | 1336 | * Take a dquot out of the mount's dqlist as well as the hashlist. | 
|  | 1337 | * This is called via unmount as well as quotaoff, and the purge | 
|  | 1338 | * will always succeed unless there are soft (temp) references | 
|  | 1339 | * outstanding. | 
|  | 1340 | * | 
|  | 1341 | * This returns 0 if it was purged, 1 if it wasn't. It's not an error code | 
|  | 1342 | * that we're returning! XXXsup - not cool. | 
|  | 1343 | */ | 
|  | 1344 | /* ARGSUSED */ | 
|  | 1345 | int | 
|  | 1346 | xfs_qm_dqpurge( | 
| Denys Vlasenko | 4f0e8a9 | 2008-05-19 16:34:04 +1000 | [diff] [blame] | 1347 | xfs_dquot_t	*dqp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 | { | 
| Dave Chinner | e6a81f1 | 2010-04-13 15:06:51 +1000 | [diff] [blame] | 1349 | xfs_dqhash_t	*qh = dqp->q_hash; | 
| David Chinner | 3c56836 | 2008-04-10 12:20:24 +1000 | [diff] [blame] | 1350 | xfs_mount_t	*mp = dqp->q_mount; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1351 |  | 
| Dave Chinner | 3a25404 | 2010-04-13 15:06:48 +1000 | [diff] [blame] | 1352 | ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock)); | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 1353 | ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 |  | 
|  | 1355 | xfs_dqlock(dqp); | 
|  | 1356 | /* | 
|  | 1357 | * We really can't afford to purge a dquot that is | 
|  | 1358 | * referenced, because these are hard refs. | 
|  | 1359 | * It shouldn't happen in general because we went thru _all_ inodes in | 
|  | 1360 | * dqrele_all_inodes before calling this and didn't let the mountlock go. | 
|  | 1361 | * However it is possible that we have dquots with temporary | 
|  | 1362 | * references that are not attached to an inode. e.g. see xfs_setattr(). | 
|  | 1363 | */ | 
|  | 1364 | if (dqp->q_nrefs != 0) { | 
|  | 1365 | xfs_dqunlock(dqp); | 
| Christoph Hellwig | c9a192d | 2009-02-09 08:47:22 +0100 | [diff] [blame] | 1366 | mutex_unlock(&dqp->q_hash->qh_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1367 | return (1); | 
|  | 1368 | } | 
|  | 1369 |  | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 1370 | ASSERT(!list_empty(&dqp->q_freelist)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1371 |  | 
|  | 1372 | /* | 
|  | 1373 | * If we're turning off quotas, we have to make sure that, for | 
|  | 1374 | * example, we don't delete quota disk blocks while dquots are | 
|  | 1375 | * in the process of getting written to those disk blocks. | 
|  | 1376 | * This dquot might well be on AIL, and we can't leave it there | 
|  | 1377 | * if we're turning off quotas. Basically, we need this flush | 
|  | 1378 | * lock, and are willing to block on it. | 
|  | 1379 | */ | 
| David Chinner | e1f49cf | 2008-08-13 16:41:43 +1000 | [diff] [blame] | 1380 | if (!xfs_dqflock_nowait(dqp)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | /* | 
|  | 1382 | * Block on the flush lock after nudging dquot buffer, | 
|  | 1383 | * if it is incore. | 
|  | 1384 | */ | 
|  | 1385 | xfs_qm_dqflock_pushbuf_wait(dqp); | 
|  | 1386 | } | 
|  | 1387 |  | 
|  | 1388 | /* | 
|  | 1389 | * XXXIf we're turning this type of quotas off, we don't care | 
|  | 1390 | * about the dirty metadata sitting in this dquot. OTOH, if | 
|  | 1391 | * we're unmounting, we do care, so we flush it and wait. | 
|  | 1392 | */ | 
|  | 1393 | if (XFS_DQ_IS_DIRTY(dqp)) { | 
| David Chinner | 3c56836 | 2008-04-10 12:20:24 +1000 | [diff] [blame] | 1394 | int	error; | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1395 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | /* dqflush unlocks dqflock */ | 
|  | 1397 | /* | 
|  | 1398 | * Given that dqpurge is a very rare occurrence, it is OK | 
|  | 1399 | * that we're holding the hashlist and mplist locks | 
|  | 1400 | * across the disk write. But, ... XXXsup | 
|  | 1401 | * | 
|  | 1402 | * We don't care about getting disk errors here. We need | 
|  | 1403 | * to purge this dquot anyway, so we go ahead regardless. | 
|  | 1404 | */ | 
| Dave Chinner | 20026d9 | 2010-02-04 09:48:58 +1100 | [diff] [blame] | 1405 | error = xfs_qm_dqflush(dqp, SYNC_WAIT); | 
| David Chinner | 3c56836 | 2008-04-10 12:20:24 +1000 | [diff] [blame] | 1406 | if (error) | 
|  | 1407 | xfs_fs_cmn_err(CE_WARN, mp, | 
|  | 1408 | "xfs_qm_dqpurge: dquot %p flush failed", dqp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | xfs_dqflock(dqp); | 
|  | 1410 | } | 
| Peter Leckie | bc3048e | 2008-10-30 17:05:04 +1100 | [diff] [blame] | 1411 | ASSERT(atomic_read(&dqp->q_pincount) == 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 | ASSERT(XFS_FORCED_SHUTDOWN(mp) || | 
|  | 1413 | !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); | 
|  | 1414 |  | 
| Dave Chinner | e6a81f1 | 2010-04-13 15:06:51 +1000 | [diff] [blame] | 1415 | list_del_init(&dqp->q_hashlist); | 
|  | 1416 | qh->qh_version++; | 
| Dave Chinner | 3a25404 | 2010-04-13 15:06:48 +1000 | [diff] [blame] | 1417 | list_del_init(&dqp->q_mplist); | 
|  | 1418 | mp->m_quotainfo->qi_dqreclaims++; | 
|  | 1419 | mp->m_quotainfo->qi_dquots--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | /* | 
|  | 1421 | * XXX Move this to the front of the freelist, if we can get the | 
|  | 1422 | * freelist lock. | 
|  | 1423 | */ | 
| Dave Chinner | 3a8406f | 2010-04-13 15:06:52 +1000 | [diff] [blame] | 1424 | ASSERT(!list_empty(&dqp->q_freelist)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1425 |  | 
|  | 1426 | dqp->q_mount = NULL; | 
|  | 1427 | dqp->q_hash = NULL; | 
|  | 1428 | dqp->dq_flags = XFS_DQ_INACTIVE; | 
|  | 1429 | memset(&dqp->q_core, 0, sizeof(dqp->q_core)); | 
|  | 1430 | xfs_dqfunlock(dqp); | 
|  | 1431 | xfs_dqunlock(dqp); | 
| Dave Chinner | e6a81f1 | 2010-04-13 15:06:51 +1000 | [diff] [blame] | 1432 | mutex_unlock(&qh->qh_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | return (0); | 
|  | 1434 | } | 
|  | 1435 |  | 
|  | 1436 |  | 
|  | 1437 | #ifdef QUOTADEBUG | 
|  | 1438 | void | 
|  | 1439 | xfs_qm_dqprint(xfs_dquot_t *dqp) | 
|  | 1440 | { | 
|  | 1441 | cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------"); | 
|  | 1442 | cmn_err(CE_DEBUG, "---- dquotID =  %d", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1443 | (int)be32_to_cpu(dqp->q_core.d_id)); | 
| Nathan Scott | c8ad20f | 2005-06-21 15:38:48 +1000 | [diff] [blame] | 1444 | cmn_err(CE_DEBUG, "---- type    =  %s", DQFLAGTO_TYPESTR(dqp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 | cmn_err(CE_DEBUG, "---- fs      =  0x%p", dqp->q_mount); | 
|  | 1446 | cmn_err(CE_DEBUG, "---- blkno   =  0x%x", (int) dqp->q_blkno); | 
|  | 1447 | cmn_err(CE_DEBUG, "---- boffset =  0x%x", (int) dqp->q_bufoffset); | 
|  | 1448 | cmn_err(CE_DEBUG, "---- blkhlimit =  %Lu (0x%x)", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1449 | be64_to_cpu(dqp->q_core.d_blk_hardlimit), | 
|  | 1450 | (int)be64_to_cpu(dqp->q_core.d_blk_hardlimit)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | cmn_err(CE_DEBUG, "---- blkslimit =  %Lu (0x%x)", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1452 | be64_to_cpu(dqp->q_core.d_blk_softlimit), | 
|  | 1453 | (int)be64_to_cpu(dqp->q_core.d_blk_softlimit)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | cmn_err(CE_DEBUG, "---- inohlimit =  %Lu (0x%x)", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1455 | be64_to_cpu(dqp->q_core.d_ino_hardlimit), | 
|  | 1456 | (int)be64_to_cpu(dqp->q_core.d_ino_hardlimit)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1457 | cmn_err(CE_DEBUG, "---- inoslimit =  %Lu (0x%x)", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1458 | be64_to_cpu(dqp->q_core.d_ino_softlimit), | 
|  | 1459 | (int)be64_to_cpu(dqp->q_core.d_ino_softlimit)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | cmn_err(CE_DEBUG, "---- bcount  =  %Lu (0x%x)", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1461 | be64_to_cpu(dqp->q_core.d_bcount), | 
|  | 1462 | (int)be64_to_cpu(dqp->q_core.d_bcount)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1463 | cmn_err(CE_DEBUG, "---- icount  =  %Lu (0x%x)", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1464 | be64_to_cpu(dqp->q_core.d_icount), | 
|  | 1465 | (int)be64_to_cpu(dqp->q_core.d_icount)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1466 | cmn_err(CE_DEBUG, "---- btimer  =  %d", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1467 | (int)be32_to_cpu(dqp->q_core.d_btimer)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | cmn_err(CE_DEBUG, "---- itimer  =  %d", | 
| Christoph Hellwig | 1149d96 | 2005-11-02 15:01:12 +1100 | [diff] [blame] | 1469 | (int)be32_to_cpu(dqp->q_core.d_itimer)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | cmn_err(CE_DEBUG, "---------------------------"); | 
|  | 1471 | } | 
|  | 1472 | #endif | 
|  | 1473 |  | 
|  | 1474 | /* | 
|  | 1475 | * Give the buffer a little push if it is incore and | 
|  | 1476 | * wait on the flush lock. | 
|  | 1477 | */ | 
|  | 1478 | void | 
|  | 1479 | xfs_qm_dqflock_pushbuf_wait( | 
|  | 1480 | xfs_dquot_t	*dqp) | 
|  | 1481 | { | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1482 | xfs_mount_t	*mp = dqp->q_mount; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | xfs_buf_t	*bp; | 
|  | 1484 |  | 
|  | 1485 | /* | 
|  | 1486 | * Check to see if the dquot has been flushed delayed | 
|  | 1487 | * write.  If so, grab its buffer and send it | 
|  | 1488 | * out immediately.  We'll be able to acquire | 
|  | 1489 | * the flush lock when the I/O completes. | 
|  | 1490 | */ | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1491 | bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno, | 
|  | 1492 | mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); | 
| Dave Chinner | 7d6a7bd | 2010-01-26 15:13:41 +1100 | [diff] [blame] | 1493 | if (!bp) | 
|  | 1494 | goto out_lock; | 
| Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 1495 |  | 
| Dave Chinner | 7d6a7bd | 2010-01-26 15:13:41 +1100 | [diff] [blame] | 1496 | if (XFS_BUF_ISDELAYWRITE(bp)) { | 
|  | 1497 | if (XFS_BUF_ISPINNED(bp)) | 
| Christoph Hellwig | 8a7b8a8 | 2010-04-20 17:01:30 +1000 | [diff] [blame] | 1498 | xfs_log_force(mp, 0); | 
| Dave Chinner | 7d6a7bd | 2010-01-26 15:13:41 +1100 | [diff] [blame] | 1499 | xfs_buf_delwri_promote(bp); | 
|  | 1500 | wake_up_process(bp->b_target->bt_task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | } | 
| Dave Chinner | 7d6a7bd | 2010-01-26 15:13:41 +1100 | [diff] [blame] | 1502 | xfs_buf_relse(bp); | 
|  | 1503 | out_lock: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 | xfs_dqflock(dqp); | 
|  | 1505 | } |