blob: 10ecb33298d8a405b45b03d2aa57377f615f793f [file] [log] [blame]
Jan Kara9e33d692008-08-25 19:56:50 +02001/*
2 * Implementation of operations over global quota file
3 */
Mark Fasheh171bf932008-10-20 15:36:47 +02004#include <linux/spinlock.h>
Jan Kara9e33d692008-08-25 19:56:50 +02005#include <linux/fs.h>
6#include <linux/quota.h>
7#include <linux/quotaops.h>
8#include <linux/dqblk_qtree.h>
Mark Fasheh171bf932008-10-20 15:36:47 +02009#include <linux/jiffies.h>
10#include <linux/writeback.h>
11#include <linux/workqueue.h>
Jan Kara9e33d692008-08-25 19:56:50 +020012
13#define MLOG_MASK_PREFIX ML_QUOTA
14#include <cluster/masklog.h>
15
16#include "ocfs2_fs.h"
17#include "ocfs2.h"
18#include "alloc.h"
19#include "inode.h"
20#include "journal.h"
21#include "file.h"
22#include "sysfile.h"
23#include "dlmglue.h"
24#include "uptodate.h"
25#include "quota.h"
26
Mark Fasheh171bf932008-10-20 15:36:47 +020027static struct workqueue_struct *ocfs2_quota_wq = NULL;
28
29static void qsync_work_fn(struct work_struct *work);
30
Jan Kara9e33d692008-08-25 19:56:50 +020031static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
32{
33 struct ocfs2_global_disk_dqblk *d = dp;
34 struct mem_dqblk *m = &dquot->dq_dqb;
35
36 /* Update from disk only entries not set by the admin */
37 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
38 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
39 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
40 }
41 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
42 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
43 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
44 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
45 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
46 }
47 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
48 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
49 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
50 m->dqb_btime = le64_to_cpu(d->dqb_btime);
51 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
52 m->dqb_itime = le64_to_cpu(d->dqb_itime);
53 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
54}
55
56static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
57{
58 struct ocfs2_global_disk_dqblk *d = dp;
59 struct mem_dqblk *m = &dquot->dq_dqb;
60
61 d->dqb_id = cpu_to_le32(dquot->dq_id);
62 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
63 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
64 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
65 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
66 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
67 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
68 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
69 d->dqb_btime = cpu_to_le64(m->dqb_btime);
70 d->dqb_itime = cpu_to_le64(m->dqb_itime);
71}
72
73static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
74{
75 struct ocfs2_global_disk_dqblk *d = dp;
76 struct ocfs2_mem_dqinfo *oinfo =
77 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
78
79 if (qtree_entry_unused(&oinfo->dqi_gi, dp))
80 return 0;
81 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
82}
83
84struct qtree_fmt_operations ocfs2_global_ops = {
85 .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
86 .disk2mem_dqblk = ocfs2_global_disk2memdqb,
87 .is_id = ocfs2_global_is_id,
88};
89
Jan Kara9e33d692008-08-25 19:56:50 +020090struct buffer_head *ocfs2_read_quota_block(struct inode *inode,
91 int block, int *err)
92{
93 struct buffer_head *tmp = NULL;
94
95 *err = ocfs2_read_virt_blocks(inode, block, 1, &tmp, 0, NULL);
96 if (*err)
97 mlog_errno(*err);
98
99 return tmp;
100}
101
102static struct buffer_head *ocfs2_get_quota_block(struct inode *inode,
103 int block, int *err)
104{
105 u64 pblock, pcount;
106 struct buffer_head *bh;
107
108 down_read(&OCFS2_I(inode)->ip_alloc_sem);
109 *err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount,
110 NULL);
111 up_read(&OCFS2_I(inode)->ip_alloc_sem);
112 if (*err) {
113 mlog_errno(*err);
114 return NULL;
115 }
116 bh = sb_getblk(inode->i_sb, pblock);
117 if (!bh) {
118 *err = -EIO;
119 mlog_errno(*err);
120 }
121 return bh;
122}
123
124/* Read data from global quotafile - avoid pagecache and such because we cannot
125 * afford acquiring the locks... We use quota cluster lock to serialize
126 * operations. Caller is responsible for acquiring it. */
127ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
128 size_t len, loff_t off)
129{
130 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
131 struct inode *gqinode = oinfo->dqi_gqinode;
132 loff_t i_size = i_size_read(gqinode);
133 int offset = off & (sb->s_blocksize - 1);
134 sector_t blk = off >> sb->s_blocksize_bits;
135 int err = 0;
136 struct buffer_head *bh;
137 size_t toread, tocopy;
138
139 if (off > i_size)
140 return 0;
141 if (off + len > i_size)
142 len = i_size - off;
143 toread = len;
144 while (toread > 0) {
145 tocopy = min((size_t)(sb->s_blocksize - offset), toread);
146 bh = ocfs2_read_quota_block(gqinode, blk, &err);
147 if (!bh) {
148 mlog_errno(err);
149 return err;
150 }
151 memcpy(data, bh->b_data + offset, tocopy);
152 brelse(bh);
153 offset = 0;
154 toread -= tocopy;
155 data += tocopy;
156 blk++;
157 }
158 return len;
159}
160
161/* Write to quotafile (we know the transaction is already started and has
162 * enough credits) */
163ssize_t ocfs2_quota_write(struct super_block *sb, int type,
164 const char *data, size_t len, loff_t off)
165{
166 struct mem_dqinfo *info = sb_dqinfo(sb, type);
167 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
168 struct inode *gqinode = oinfo->dqi_gqinode;
169 int offset = off & (sb->s_blocksize - 1);
170 sector_t blk = off >> sb->s_blocksize_bits;
171 int err = 0, new = 0;
172 struct buffer_head *bh;
173 handle_t *handle = journal_current_handle();
174
175 if (!handle) {
176 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
177 "because transaction was not started.\n",
178 (unsigned long long)off, (unsigned long long)len);
179 return -EIO;
180 }
181 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
182 WARN_ON(1);
183 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
184 }
185
186 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
187 if (gqinode->i_size < off + len) {
188 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
189 err = ocfs2_extend_no_holes(gqinode, off + len, off);
190 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
191 if (err < 0)
192 goto out;
193 err = ocfs2_simple_size_update(gqinode,
194 oinfo->dqi_gqi_bh,
195 off + len);
196 if (err < 0)
197 goto out;
198 new = 1;
199 }
200 /* Not rewriting whole block? */
201 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
202 !new) {
203 bh = ocfs2_read_quota_block(gqinode, blk, &err);
204 if (!bh) {
205 mlog_errno(err);
206 return err;
207 }
208 err = ocfs2_journal_access(handle, gqinode, bh,
209 OCFS2_JOURNAL_ACCESS_WRITE);
210 } else {
211 bh = ocfs2_get_quota_block(gqinode, blk, &err);
212 if (!bh) {
213 mlog_errno(err);
214 return err;
215 }
216 err = ocfs2_journal_access(handle, gqinode, bh,
217 OCFS2_JOURNAL_ACCESS_CREATE);
218 }
219 if (err < 0) {
220 brelse(bh);
221 goto out;
222 }
223 lock_buffer(bh);
224 if (new)
225 memset(bh->b_data, 0, sb->s_blocksize);
226 memcpy(bh->b_data + offset, data, len);
227 flush_dcache_page(bh->b_page);
228 unlock_buffer(bh);
229 ocfs2_set_buffer_uptodate(gqinode, bh);
230 err = ocfs2_journal_dirty(handle, bh);
231 brelse(bh);
232 if (err < 0)
233 goto out;
234out:
235 if (err) {
236 mutex_unlock(&gqinode->i_mutex);
237 mlog_errno(err);
238 return err;
239 }
240 gqinode->i_version++;
241 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
242 mutex_unlock(&gqinode->i_mutex);
243 return len;
244}
245
246int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
247{
248 int status;
249 struct buffer_head *bh = NULL;
250
251 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
252 if (status < 0)
253 return status;
254 spin_lock(&dq_data_lock);
255 if (!oinfo->dqi_gqi_count++)
256 oinfo->dqi_gqi_bh = bh;
257 else
258 WARN_ON(bh != oinfo->dqi_gqi_bh);
259 spin_unlock(&dq_data_lock);
260 return 0;
261}
262
263void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
264{
265 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
266 brelse(oinfo->dqi_gqi_bh);
267 spin_lock(&dq_data_lock);
268 if (!--oinfo->dqi_gqi_count)
269 oinfo->dqi_gqi_bh = NULL;
270 spin_unlock(&dq_data_lock);
271}
272
273/* Read information header from global quota file */
274int ocfs2_global_read_info(struct super_block *sb, int type)
275{
276 struct inode *gqinode = NULL;
277 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
278 GROUP_QUOTA_SYSTEM_INODE };
279 struct ocfs2_global_disk_dqinfo dinfo;
280 struct mem_dqinfo *info = sb_dqinfo(sb, type);
281 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
282 int status;
283
284 mlog_entry_void();
285
286 /* Read global header */
287 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
288 OCFS2_INVALID_SLOT);
289 if (!gqinode) {
290 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
291 type);
292 status = -EINVAL;
293 goto out_err;
294 }
295 oinfo->dqi_gi.dqi_sb = sb;
296 oinfo->dqi_gi.dqi_type = type;
297 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
298 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
299 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
300 oinfo->dqi_gqi_bh = NULL;
301 oinfo->dqi_gqi_count = 0;
302 oinfo->dqi_gqinode = gqinode;
303 status = ocfs2_lock_global_qf(oinfo, 0);
304 if (status < 0) {
305 mlog_errno(status);
306 goto out_err;
307 }
308 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
309 sizeof(struct ocfs2_global_disk_dqinfo),
310 OCFS2_GLOBAL_INFO_OFF);
311 ocfs2_unlock_global_qf(oinfo, 0);
312 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
313 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
314 status);
315 if (status >= 0)
316 status = -EIO;
317 mlog_errno(status);
318 goto out_err;
319 }
320 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
321 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
322 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
Mark Fasheh171bf932008-10-20 15:36:47 +0200323 oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
Jan Kara9e33d692008-08-25 19:56:50 +0200324 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
325 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
326 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
327 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
328 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
329 OCFS2_QBLK_RESERVED_SPACE;
330 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
Mark Fasheh171bf932008-10-20 15:36:47 +0200331 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
332 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
333 oinfo->dqi_syncjiff);
334
Jan Kara9e33d692008-08-25 19:56:50 +0200335out_err:
336 mlog_exit(status);
337 return status;
338}
339
340/* Write information to global quota file. Expects exlusive lock on quota
341 * file inode and quota info */
342static int __ocfs2_global_write_info(struct super_block *sb, int type)
343{
344 struct mem_dqinfo *info = sb_dqinfo(sb, type);
345 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
346 struct ocfs2_global_disk_dqinfo dinfo;
347 ssize_t size;
348
349 spin_lock(&dq_data_lock);
350 info->dqi_flags &= ~DQF_INFO_DIRTY;
351 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
352 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
353 spin_unlock(&dq_data_lock);
354 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
355 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
356 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
357 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
358 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
359 sizeof(struct ocfs2_global_disk_dqinfo),
360 OCFS2_GLOBAL_INFO_OFF);
361 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
362 mlog(ML_ERROR, "Cannot write global quota info structure\n");
363 if (size >= 0)
364 size = -EIO;
365 return size;
366 }
367 return 0;
368}
369
370int ocfs2_global_write_info(struct super_block *sb, int type)
371{
372 int err;
373 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
374
375 err = ocfs2_qinfo_lock(info, 1);
376 if (err < 0)
377 return err;
378 err = __ocfs2_global_write_info(sb, type);
379 ocfs2_qinfo_unlock(info, 1);
380 return err;
381}
382
383/* Read in information from global quota file and acquire a reference to it.
384 * dquot_acquire() has already started the transaction and locked quota file */
385int ocfs2_global_read_dquot(struct dquot *dquot)
386{
387 int err, err2, ex = 0;
388 struct ocfs2_mem_dqinfo *info =
389 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
390
391 err = ocfs2_qinfo_lock(info, 0);
392 if (err < 0)
393 goto out;
394 err = qtree_read_dquot(&info->dqi_gi, dquot);
395 if (err < 0)
396 goto out_qlock;
397 OCFS2_DQUOT(dquot)->dq_use_count++;
398 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
399 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
400 if (!dquot->dq_off) { /* No real quota entry? */
401 /* Upgrade to exclusive lock for allocation */
402 err = ocfs2_qinfo_lock(info, 1);
403 if (err < 0)
404 goto out_qlock;
405 ex = 1;
406 }
407 err = qtree_write_dquot(&info->dqi_gi, dquot);
408 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
409 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
410 if (!err)
411 err = err2;
412 }
413out_qlock:
414 if (ex)
415 ocfs2_qinfo_unlock(info, 1);
416 ocfs2_qinfo_unlock(info, 0);
417out:
418 if (err < 0)
419 mlog_errno(err);
420 return err;
421}
422
423/* Sync local information about quota modifications with global quota file.
424 * Caller must have started the transaction and obtained exclusive lock for
425 * global quota file inode */
426int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
427{
428 int err, err2;
429 struct super_block *sb = dquot->dq_sb;
430 int type = dquot->dq_type;
431 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
432 struct ocfs2_global_disk_dqblk dqblk;
433 s64 spacechange, inodechange;
434 time_t olditime, oldbtime;
435
436 err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
437 sizeof(struct ocfs2_global_disk_dqblk),
438 dquot->dq_off);
439 if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
440 if (err >= 0) {
441 mlog(ML_ERROR, "Short read from global quota file "
442 "(%u read)\n", err);
443 err = -EIO;
444 }
445 goto out;
446 }
447
448 /* Update space and inode usage. Get also other information from
449 * global quota file so that we don't overwrite any changes there.
450 * We are */
451 spin_lock(&dq_data_lock);
452 spacechange = dquot->dq_dqb.dqb_curspace -
453 OCFS2_DQUOT(dquot)->dq_origspace;
454 inodechange = dquot->dq_dqb.dqb_curinodes -
455 OCFS2_DQUOT(dquot)->dq_originodes;
456 olditime = dquot->dq_dqb.dqb_itime;
457 oldbtime = dquot->dq_dqb.dqb_btime;
458 ocfs2_global_disk2memdqb(dquot, &dqblk);
459 mlog(0, "Syncing global dquot %d space %lld+%lld, inodes %lld+%lld\n",
460 dquot->dq_id, dquot->dq_dqb.dqb_curspace, spacechange,
461 dquot->dq_dqb.dqb_curinodes, inodechange);
462 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
463 dquot->dq_dqb.dqb_curspace += spacechange;
464 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
465 dquot->dq_dqb.dqb_curinodes += inodechange;
466 /* Set properly space grace time... */
467 if (dquot->dq_dqb.dqb_bsoftlimit &&
468 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
469 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
470 oldbtime > 0) {
471 if (dquot->dq_dqb.dqb_btime > 0)
472 dquot->dq_dqb.dqb_btime =
473 min(dquot->dq_dqb.dqb_btime, oldbtime);
474 else
475 dquot->dq_dqb.dqb_btime = oldbtime;
476 }
477 } else {
478 dquot->dq_dqb.dqb_btime = 0;
479 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
480 }
481 /* Set properly inode grace time... */
482 if (dquot->dq_dqb.dqb_isoftlimit &&
483 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
484 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
485 olditime > 0) {
486 if (dquot->dq_dqb.dqb_itime > 0)
487 dquot->dq_dqb.dqb_itime =
488 min(dquot->dq_dqb.dqb_itime, olditime);
489 else
490 dquot->dq_dqb.dqb_itime = olditime;
491 }
492 } else {
493 dquot->dq_dqb.dqb_itime = 0;
494 clear_bit(DQ_INODES_B, &dquot->dq_flags);
495 }
496 /* All information is properly updated, clear the flags */
497 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
498 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
499 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
500 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
501 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
502 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
503 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
504 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
505 spin_unlock(&dq_data_lock);
506 err = ocfs2_qinfo_lock(info, freeing);
507 if (err < 0) {
508 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
509 " (type=%d, id=%u)\n", dquot->dq_type,
510 (unsigned)dquot->dq_id);
511 goto out;
512 }
513 if (freeing)
514 OCFS2_DQUOT(dquot)->dq_use_count--;
515 err = qtree_write_dquot(&info->dqi_gi, dquot);
516 if (err < 0)
517 goto out_qlock;
518 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
519 err = qtree_release_dquot(&info->dqi_gi, dquot);
520 if (info_dirty(sb_dqinfo(sb, type))) {
521 err2 = __ocfs2_global_write_info(sb, type);
522 if (!err)
523 err = err2;
524 }
525 }
526out_qlock:
527 ocfs2_qinfo_unlock(info, freeing);
528out:
529 if (err < 0)
530 mlog_errno(err);
531 return err;
532}
533
534/*
Mark Fasheh171bf932008-10-20 15:36:47 +0200535 * Functions for periodic syncing of dquots with global file
536 */
537static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
538{
539 handle_t *handle;
540 struct super_block *sb = dquot->dq_sb;
541 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
542 struct ocfs2_super *osb = OCFS2_SB(sb);
543 int status = 0;
544
545 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
546 dquot->dq_type, type, sb->s_id);
547 if (type != dquot->dq_type)
548 goto out;
549 status = ocfs2_lock_global_qf(oinfo, 1);
550 if (status < 0)
551 goto out;
552
553 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
554 if (IS_ERR(handle)) {
555 status = PTR_ERR(handle);
556 mlog_errno(status);
557 goto out_ilock;
558 }
559 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
560 status = ocfs2_sync_dquot(dquot);
561 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
562 if (status < 0)
563 mlog_errno(status);
564 /* We have to write local structure as well... */
565 dquot_mark_dquot_dirty(dquot);
566 status = dquot_commit(dquot);
567 if (status < 0)
568 mlog_errno(status);
569 ocfs2_commit_trans(osb, handle);
570out_ilock:
571 ocfs2_unlock_global_qf(oinfo, 1);
572out:
573 mlog_exit(status);
574 return status;
575}
576
577static void qsync_work_fn(struct work_struct *work)
578{
579 struct ocfs2_mem_dqinfo *oinfo = container_of(work,
580 struct ocfs2_mem_dqinfo,
581 dqi_sync_work.work);
582 struct super_block *sb = oinfo->dqi_gqinode->i_sb;
583
584 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
585 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
586 oinfo->dqi_syncjiff);
587}
588
589/*
Jan Kara9e33d692008-08-25 19:56:50 +0200590 * Wrappers for generic quota functions
591 */
592
593static int ocfs2_write_dquot(struct dquot *dquot)
594{
595 handle_t *handle;
596 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
597 int status = 0;
598
599 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
600
601 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
602 if (IS_ERR(handle)) {
603 status = PTR_ERR(handle);
604 mlog_errno(status);
605 goto out;
606 }
607 status = dquot_commit(dquot);
608 ocfs2_commit_trans(osb, handle);
609out:
610 mlog_exit(status);
611 return status;
612}
613
614int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
615{
616 struct ocfs2_mem_dqinfo *oinfo;
617 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
618 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
619
620 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
621 return 0;
622
623 oinfo = sb_dqinfo(sb, type)->dqi_priv;
624 /* We modify tree, leaf block, global info, local chunk header,
625 * global and local inode */
626 return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
627 2 * OCFS2_INODE_UPDATE_CREDITS;
628}
629
630static int ocfs2_release_dquot(struct dquot *dquot)
631{
632 handle_t *handle;
633 struct ocfs2_mem_dqinfo *oinfo =
634 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
635 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
636 int status = 0;
637
638 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
639
640 status = ocfs2_lock_global_qf(oinfo, 1);
641 if (status < 0)
642 goto out;
643 handle = ocfs2_start_trans(osb,
644 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
645 if (IS_ERR(handle)) {
646 status = PTR_ERR(handle);
647 mlog_errno(status);
648 goto out_ilock;
649 }
650 status = dquot_release(dquot);
651 ocfs2_commit_trans(osb, handle);
652out_ilock:
653 ocfs2_unlock_global_qf(oinfo, 1);
654out:
655 mlog_exit(status);
656 return status;
657}
658
659int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
660{
661 struct ocfs2_mem_dqinfo *oinfo;
662 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
663 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
664 struct ocfs2_dinode *lfe, *gfe;
665
666 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
667 return 0;
668
669 oinfo = sb_dqinfo(sb, type)->dqi_priv;
670 gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
671 lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
672 /* We can extend local file + global file. In local file we
673 * can modify info, chunk header block and dquot block. In
674 * global file we can modify info, tree and leaf block */
675 return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
676 ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
677 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
678}
679
680static int ocfs2_acquire_dquot(struct dquot *dquot)
681{
682 handle_t *handle;
683 struct ocfs2_mem_dqinfo *oinfo =
684 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
685 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
686 int status = 0;
687
688 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
689 /* We need an exclusive lock, because we're going to update use count
690 * and instantiate possibly new dquot structure */
691 status = ocfs2_lock_global_qf(oinfo, 1);
692 if (status < 0)
693 goto out;
694 handle = ocfs2_start_trans(osb,
695 ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
696 if (IS_ERR(handle)) {
697 status = PTR_ERR(handle);
698 mlog_errno(status);
699 goto out_ilock;
700 }
701 status = dquot_acquire(dquot);
702 ocfs2_commit_trans(osb, handle);
703out_ilock:
704 ocfs2_unlock_global_qf(oinfo, 1);
705out:
706 mlog_exit(status);
707 return status;
708}
709
710static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
711{
712 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
713 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
714 (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
715 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
716 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
717 (1 << (DQ_LASTSET_B + QIF_ITIME_B));
718 int sync = 0;
719 int status;
720 struct super_block *sb = dquot->dq_sb;
721 int type = dquot->dq_type;
722 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
723 handle_t *handle;
724 struct ocfs2_super *osb = OCFS2_SB(sb);
725
726 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
727 dquot_mark_dquot_dirty(dquot);
728
729 /* In case user set some limits, sync dquot immediately to global
730 * quota file so that information propagates quicker */
731 spin_lock(&dq_data_lock);
732 if (dquot->dq_flags & mask)
733 sync = 1;
734 spin_unlock(&dq_data_lock);
735 if (!sync) {
736 status = ocfs2_write_dquot(dquot);
737 goto out;
738 }
739 status = ocfs2_lock_global_qf(oinfo, 1);
740 if (status < 0)
741 goto out;
742 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
743 if (IS_ERR(handle)) {
744 status = PTR_ERR(handle);
745 mlog_errno(status);
746 goto out_ilock;
747 }
748 status = ocfs2_sync_dquot(dquot);
749 if (status < 0) {
750 mlog_errno(status);
751 goto out_trans;
752 }
753 /* Now write updated local dquot structure */
754 status = dquot_commit(dquot);
755out_trans:
756 ocfs2_commit_trans(osb, handle);
757out_ilock:
758 ocfs2_unlock_global_qf(oinfo, 1);
759out:
760 mlog_exit(status);
761 return status;
762}
763
764/* This should happen only after set_dqinfo(). */
765static int ocfs2_write_info(struct super_block *sb, int type)
766{
767 handle_t *handle;
768 int status = 0;
769 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
770
771 mlog_entry_void();
772
773 status = ocfs2_lock_global_qf(oinfo, 1);
774 if (status < 0)
775 goto out;
776 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
777 if (IS_ERR(handle)) {
778 status = PTR_ERR(handle);
779 mlog_errno(status);
780 goto out_ilock;
781 }
782 status = dquot_commit_info(sb, type);
783 ocfs2_commit_trans(OCFS2_SB(sb), handle);
784out_ilock:
785 ocfs2_unlock_global_qf(oinfo, 1);
786out:
787 mlog_exit(status);
788 return status;
789}
790
791/* This is difficult. We have to lock quota inode and start transaction
792 * in this function but we don't want to take the penalty of exlusive
793 * quota file lock when we are just going to use cached structures. So
794 * we just take read lock check whether we have dquot cached and if so,
795 * we don't have to take the write lock... */
796static int ocfs2_dquot_initialize(struct inode *inode, int type)
797{
798 handle_t *handle = NULL;
799 int status = 0;
800 struct super_block *sb = inode->i_sb;
801 struct ocfs2_mem_dqinfo *oinfo;
802 int exclusive = 0;
803 int cnt;
804 qid_t id;
805
806 mlog_entry_void();
807
808 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
809 if (type != -1 && cnt != type)
810 continue;
811 if (!sb_has_quota_active(sb, cnt))
812 continue;
813 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
814 status = ocfs2_lock_global_qf(oinfo, 0);
815 if (status < 0)
816 goto out;
817 /* This is just a performance optimization not a reliable test.
818 * Since we hold an inode lock, noone can actually release
819 * the structure until we are finished with initialization. */
820 if (inode->i_dquot[cnt] != NODQUOT) {
821 ocfs2_unlock_global_qf(oinfo, 0);
822 continue;
823 }
824 /* When we have inode lock, we know that no dquot_release() can
825 * run and thus we can safely check whether we need to
826 * read+modify global file to get quota information or whether
827 * our node already has it. */
828 if (cnt == USRQUOTA)
829 id = inode->i_uid;
830 else if (cnt == GRPQUOTA)
831 id = inode->i_gid;
832 else
833 BUG();
834 /* Obtain exclusion from quota off... */
835 down_write(&sb_dqopt(sb)->dqptr_sem);
836 exclusive = !dquot_is_cached(sb, id, cnt);
837 up_write(&sb_dqopt(sb)->dqptr_sem);
838 if (exclusive) {
839 status = ocfs2_lock_global_qf(oinfo, 1);
840 if (status < 0) {
841 exclusive = 0;
842 mlog_errno(status);
843 goto out_ilock;
844 }
845 handle = ocfs2_start_trans(OCFS2_SB(sb),
846 ocfs2_calc_qinit_credits(sb, cnt));
847 if (IS_ERR(handle)) {
848 status = PTR_ERR(handle);
849 mlog_errno(status);
850 goto out_ilock;
851 }
852 }
853 dquot_initialize(inode, cnt);
854 if (exclusive) {
855 ocfs2_commit_trans(OCFS2_SB(sb), handle);
856 ocfs2_unlock_global_qf(oinfo, 1);
857 }
858 ocfs2_unlock_global_qf(oinfo, 0);
859 }
860 mlog_exit(0);
861 return 0;
862out_ilock:
863 if (exclusive)
864 ocfs2_unlock_global_qf(oinfo, 1);
865 ocfs2_unlock_global_qf(oinfo, 0);
866out:
867 mlog_exit(status);
868 return status;
869}
870
871static int ocfs2_dquot_drop_slow(struct inode *inode)
872{
Jan Kara57a09a72008-11-25 15:31:26 +0100873 int status = 0;
Jan Kara9e33d692008-08-25 19:56:50 +0200874 int cnt;
875 int got_lock[MAXQUOTAS] = {0, 0};
876 handle_t *handle;
877 struct super_block *sb = inode->i_sb;
878 struct ocfs2_mem_dqinfo *oinfo;
879
880 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
881 if (!sb_has_quota_active(sb, cnt))
882 continue;
883 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
884 status = ocfs2_lock_global_qf(oinfo, 1);
885 if (status < 0)
886 goto out;
887 got_lock[cnt] = 1;
888 }
889 handle = ocfs2_start_trans(OCFS2_SB(sb),
890 ocfs2_calc_qinit_credits(sb, USRQUOTA) +
891 ocfs2_calc_qinit_credits(sb, GRPQUOTA));
892 if (IS_ERR(handle)) {
893 status = PTR_ERR(handle);
894 mlog_errno(status);
895 goto out;
896 }
897 dquot_drop(inode);
898 ocfs2_commit_trans(OCFS2_SB(sb), handle);
899out:
900 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
901 if (got_lock[cnt]) {
902 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
903 ocfs2_unlock_global_qf(oinfo, 1);
904 }
905 return status;
906}
907
908/* See the comment before ocfs2_dquot_initialize. */
909static int ocfs2_dquot_drop(struct inode *inode)
910{
911 int status = 0;
912 struct super_block *sb = inode->i_sb;
913 struct ocfs2_mem_dqinfo *oinfo;
914 int exclusive = 0;
915 int cnt;
916 int got_lock[MAXQUOTAS] = {0, 0};
917
918 mlog_entry_void();
919 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
920 if (!sb_has_quota_active(sb, cnt))
921 continue;
922 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
923 status = ocfs2_lock_global_qf(oinfo, 0);
924 if (status < 0)
925 goto out;
926 got_lock[cnt] = 1;
927 }
928 /* Lock against anyone releasing references so that when when we check
929 * we know we are not going to be last ones to release dquot */
930 down_write(&sb_dqopt(sb)->dqptr_sem);
931 /* Urgh, this is a terrible hack :( */
932 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
933 if (inode->i_dquot[cnt] != NODQUOT &&
934 atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
935 exclusive = 1;
936 break;
937 }
938 }
939 if (!exclusive)
940 dquot_drop_locked(inode);
941 up_write(&sb_dqopt(sb)->dqptr_sem);
942out:
943 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
944 if (got_lock[cnt]) {
945 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
946 ocfs2_unlock_global_qf(oinfo, 0);
947 }
948 /* In case we bailed out because we had to do expensive locking
949 * do it now... */
950 if (exclusive)
951 status = ocfs2_dquot_drop_slow(inode);
952 mlog_exit(status);
953 return status;
954}
955
956static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
957{
958 struct ocfs2_dquot *dquot =
959 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
960
961 if (!dquot)
962 return NULL;
963 return &dquot->dq_dquot;
964}
965
966static void ocfs2_destroy_dquot(struct dquot *dquot)
967{
968 kmem_cache_free(ocfs2_dquot_cachep, dquot);
969}
970
971struct dquot_operations ocfs2_quota_operations = {
972 .initialize = ocfs2_dquot_initialize,
973 .drop = ocfs2_dquot_drop,
974 .alloc_space = dquot_alloc_space,
975 .alloc_inode = dquot_alloc_inode,
976 .free_space = dquot_free_space,
977 .free_inode = dquot_free_inode,
978 .transfer = dquot_transfer,
979 .write_dquot = ocfs2_write_dquot,
980 .acquire_dquot = ocfs2_acquire_dquot,
981 .release_dquot = ocfs2_release_dquot,
982 .mark_dirty = ocfs2_mark_dquot_dirty,
983 .write_info = ocfs2_write_info,
984 .alloc_dquot = ocfs2_alloc_dquot,
985 .destroy_dquot = ocfs2_destroy_dquot,
986};
Mark Fasheh171bf932008-10-20 15:36:47 +0200987
988int ocfs2_quota_setup(void)
989{
990 ocfs2_quota_wq = create_workqueue("o2quot");
991 if (!ocfs2_quota_wq)
992 return -ENOMEM;
993 return 0;
994}
995
996void ocfs2_quota_shutdown(void)
997{
998 if (ocfs2_quota_wq) {
999 flush_workqueue(ocfs2_quota_wq);
1000 destroy_workqueue(ocfs2_quota_wq);
1001 ocfs2_quota_wq = NULL;
1002 }
1003}