blob: 24d0fbd4271ce647888c86ba95ad9f6883345c93 [file] [log] [blame]
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001/*
2 * segment.c - NILFS segment constructor.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 *
22 */
23
24#include <linux/pagemap.h>
25#include <linux/buffer_head.h>
26#include <linux/writeback.h>
27#include <linux/bio.h>
28#include <linux/completion.h>
29#include <linux/blkdev.h>
30#include <linux/backing-dev.h>
31#include <linux/freezer.h>
32#include <linux/kthread.h>
33#include <linux/crc32.h>
34#include <linux/pagevec.h>
35#include "nilfs.h"
36#include "btnode.h"
37#include "page.h"
38#include "segment.h"
39#include "sufile.h"
40#include "cpfile.h"
41#include "ifile.h"
42#include "seglist.h"
43#include "segbuf.h"
44
45
46/*
47 * Segment constructor
48 */
49#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
50
51#define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
52 appended in collection retry loop */
53
54/* Construction mode */
55enum {
56 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
57 SC_LSEG_DSYNC, /* Flush data blocks of a given file and make
58 a logical segment without a super root */
59 SC_FLUSH_FILE, /* Flush data files, leads to segment writes without
60 creating a checkpoint */
61 SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without
62 a checkpoint */
63};
64
65/* Stage numbers of dirty block collection */
66enum {
67 NILFS_ST_INIT = 0,
68 NILFS_ST_GC, /* Collecting dirty blocks for GC */
69 NILFS_ST_FILE,
70 NILFS_ST_SKETCH,
71 NILFS_ST_IFILE,
72 NILFS_ST_CPFILE,
73 NILFS_ST_SUFILE,
74 NILFS_ST_DAT,
75 NILFS_ST_SR, /* Super root */
76 NILFS_ST_DSYNC, /* Data sync blocks */
77 NILFS_ST_DONE,
78};
79
80/* State flags of collection */
81#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
82#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
83#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED)
84
85/* Operations depending on the construction mode and file type */
86struct nilfs_sc_operations {
87 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
88 struct inode *);
89 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
90 struct inode *);
91 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
92 struct inode *);
93 void (*write_data_binfo)(struct nilfs_sc_info *,
94 struct nilfs_segsum_pointer *,
95 union nilfs_binfo *);
96 void (*write_node_binfo)(struct nilfs_sc_info *,
97 struct nilfs_segsum_pointer *,
98 union nilfs_binfo *);
99};
100
101/*
102 * Other definitions
103 */
104static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
105static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
106static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
107static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *,
108 int);
109
110#define nilfs_cnt32_gt(a, b) \
111 (typecheck(__u32, a) && typecheck(__u32, b) && \
112 ((__s32)(b) - (__s32)(a) < 0))
113#define nilfs_cnt32_ge(a, b) \
114 (typecheck(__u32, a) && typecheck(__u32, b) && \
115 ((__s32)(a) - (__s32)(b) >= 0))
116#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
117#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
118
119/*
120 * Transaction
121 */
122static struct kmem_cache *nilfs_transaction_cachep;
123
124/**
125 * nilfs_init_transaction_cache - create a cache for nilfs_transaction_info
126 *
127 * nilfs_init_transaction_cache() creates a slab cache for the struct
128 * nilfs_transaction_info.
129 *
130 * Return Value: On success, it returns 0. On error, one of the following
131 * negative error code is returned.
132 *
133 * %-ENOMEM - Insufficient memory available.
134 */
135int nilfs_init_transaction_cache(void)
136{
137 nilfs_transaction_cachep =
138 kmem_cache_create("nilfs2_transaction_cache",
139 sizeof(struct nilfs_transaction_info),
140 0, SLAB_RECLAIM_ACCOUNT, NULL);
141 return (nilfs_transaction_cachep == NULL) ? -ENOMEM : 0;
142}
143
144/**
145 * nilfs_detroy_transaction_cache - destroy the cache for transaction info
146 *
147 * nilfs_destroy_transaction_cache() frees the slab cache for the struct
148 * nilfs_transaction_info.
149 */
150void nilfs_destroy_transaction_cache(void)
151{
152 kmem_cache_destroy(nilfs_transaction_cachep);
153}
154
155static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
156{
157 struct nilfs_transaction_info *cur_ti = current->journal_info;
158 void *save = NULL;
159
160 if (cur_ti) {
161 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
162 return ++cur_ti->ti_count;
163 else {
164 /*
165 * If journal_info field is occupied by other FS,
Ryusuke Konishi47420c72009-04-06 19:01:45 -0700166 * it is saved and will be restored on
167 * nilfs_transaction_commit().
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700168 */
169 printk(KERN_WARNING
170 "NILFS warning: journal info from a different "
171 "FS\n");
172 save = current->journal_info;
173 }
174 }
175 if (!ti) {
176 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
177 if (!ti)
178 return -ENOMEM;
179 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
180 } else {
181 ti->ti_flags = 0;
182 }
183 ti->ti_count = 0;
184 ti->ti_save = save;
185 ti->ti_magic = NILFS_TI_MAGIC;
186 current->journal_info = ti;
187 return 0;
188}
189
190/**
191 * nilfs_transaction_begin - start indivisible file operations.
192 * @sb: super block
193 * @ti: nilfs_transaction_info
194 * @vacancy_check: flags for vacancy rate checks
195 *
196 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
197 * the segment semaphore, to make a segment construction and write tasks
Ryusuke Konishi47420c72009-04-06 19:01:45 -0700198 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700199 * The region enclosed by these two functions can be nested. To avoid a
200 * deadlock, the semaphore is only acquired or released in the outermost call.
201 *
202 * This function allocates a nilfs_transaction_info struct to keep context
203 * information on it. It is initialized and hooked onto the current task in
204 * the outermost call. If a pre-allocated struct is given to @ti, it is used
205 * instead; othewise a new struct is assigned from a slab.
206 *
207 * When @vacancy_check flag is set, this function will check the amount of
208 * free space, and will wait for the GC to reclaim disk space if low capacity.
209 *
210 * Return Value: On success, 0 is returned. On error, one of the following
211 * negative error code is returned.
212 *
213 * %-ENOMEM - Insufficient memory available.
214 *
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700215 * %-ENOSPC - No space left on device
216 */
217int nilfs_transaction_begin(struct super_block *sb,
218 struct nilfs_transaction_info *ti,
219 int vacancy_check)
220{
221 struct nilfs_sb_info *sbi;
222 struct the_nilfs *nilfs;
223 int ret = nilfs_prepare_segment_lock(ti);
224
225 if (unlikely(ret < 0))
226 return ret;
227 if (ret > 0)
228 return 0;
229
230 sbi = NILFS_SB(sb);
231 nilfs = sbi->s_nilfs;
232 down_read(&nilfs->ns_segctor_sem);
233 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
234 up_read(&nilfs->ns_segctor_sem);
235 ret = -ENOSPC;
236 goto failed;
237 }
238 return 0;
239
240 failed:
241 ti = current->journal_info;
242 current->journal_info = ti->ti_save;
243 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
244 kmem_cache_free(nilfs_transaction_cachep, ti);
245 return ret;
246}
247
248/**
Ryusuke Konishi47420c72009-04-06 19:01:45 -0700249 * nilfs_transaction_commit - commit indivisible file operations.
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700250 * @sb: super block
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700251 *
Ryusuke Konishi47420c72009-04-06 19:01:45 -0700252 * nilfs_transaction_commit() releases the read semaphore which is
253 * acquired by nilfs_transaction_begin(). This is only performed
254 * in outermost call of this function. If a commit flag is set,
255 * nilfs_transaction_commit() sets a timer to start the segment
256 * constructor. If a sync flag is set, it starts construction
257 * directly.
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700258 */
Ryusuke Konishi47420c72009-04-06 19:01:45 -0700259int nilfs_transaction_commit(struct super_block *sb)
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700260{
261 struct nilfs_transaction_info *ti = current->journal_info;
262 struct nilfs_sb_info *sbi;
263 struct nilfs_sc_info *sci;
264 int err = 0;
265
266 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
Ryusuke Konishi47420c72009-04-06 19:01:45 -0700267 ti->ti_flags |= NILFS_TI_COMMIT;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700268 if (ti->ti_count > 0) {
269 ti->ti_count--;
270 return 0;
271 }
272 sbi = NILFS_SB(sb);
273 sci = NILFS_SC(sbi);
274 if (sci != NULL) {
275 if (ti->ti_flags & NILFS_TI_COMMIT)
276 nilfs_segctor_start_timer(sci);
277 if (atomic_read(&sbi->s_nilfs->ns_ndirtyblks) >
278 sci->sc_watermark)
279 nilfs_segctor_do_flush(sci, 0);
280 }
281 up_read(&sbi->s_nilfs->ns_segctor_sem);
282 current->journal_info = ti->ti_save;
283
284 if (ti->ti_flags & NILFS_TI_SYNC)
285 err = nilfs_construct_segment(sb);
286 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287 kmem_cache_free(nilfs_transaction_cachep, ti);
288 return err;
289}
290
Ryusuke Konishi47420c72009-04-06 19:01:45 -0700291void nilfs_transaction_abort(struct super_block *sb)
292{
293 struct nilfs_transaction_info *ti = current->journal_info;
294
295 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
296 if (ti->ti_count > 0) {
297 ti->ti_count--;
298 return;
299 }
300 up_read(&NILFS_SB(sb)->s_nilfs->ns_segctor_sem);
301
302 current->journal_info = ti->ti_save;
303 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
304 kmem_cache_free(nilfs_transaction_cachep, ti);
305}
306
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700307void nilfs_relax_pressure_in_lock(struct super_block *sb)
308{
309 struct nilfs_sb_info *sbi = NILFS_SB(sb);
310 struct nilfs_sc_info *sci = NILFS_SC(sbi);
311 struct the_nilfs *nilfs = sbi->s_nilfs;
312
313 if (!sci || !sci->sc_flush_request)
314 return;
315
316 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
317 up_read(&nilfs->ns_segctor_sem);
318
319 down_write(&nilfs->ns_segctor_sem);
320 if (sci->sc_flush_request &&
321 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
322 struct nilfs_transaction_info *ti = current->journal_info;
323
324 ti->ti_flags |= NILFS_TI_WRITER;
325 nilfs_segctor_do_immediate_flush(sci);
326 ti->ti_flags &= ~NILFS_TI_WRITER;
327 }
328 downgrade_write(&nilfs->ns_segctor_sem);
329}
330
331static void nilfs_transaction_lock(struct nilfs_sb_info *sbi,
332 struct nilfs_transaction_info *ti,
333 int gcflag)
334{
335 struct nilfs_transaction_info *cur_ti = current->journal_info;
336
337 BUG_ON(cur_ti);
338 BUG_ON(!ti);
339 ti->ti_flags = NILFS_TI_WRITER;
340 ti->ti_count = 0;
341 ti->ti_save = cur_ti;
342 ti->ti_magic = NILFS_TI_MAGIC;
343 INIT_LIST_HEAD(&ti->ti_garbage);
344 current->journal_info = ti;
345
346 for (;;) {
347 down_write(&sbi->s_nilfs->ns_segctor_sem);
348 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &NILFS_SC(sbi)->sc_flags))
349 break;
350
351 nilfs_segctor_do_immediate_flush(NILFS_SC(sbi));
352
353 up_write(&sbi->s_nilfs->ns_segctor_sem);
354 yield();
355 }
356 if (gcflag)
357 ti->ti_flags |= NILFS_TI_GC;
358}
359
360static void nilfs_transaction_unlock(struct nilfs_sb_info *sbi)
361{
362 struct nilfs_transaction_info *ti = current->journal_info;
363
364 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
365 BUG_ON(ti->ti_count > 0);
366
367 up_write(&sbi->s_nilfs->ns_segctor_sem);
368 current->journal_info = ti->ti_save;
369 if (!list_empty(&ti->ti_garbage))
370 nilfs_dispose_list(sbi, &ti->ti_garbage, 0);
371}
372
373static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
374 struct nilfs_segsum_pointer *ssp,
375 unsigned bytes)
376{
377 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
378 unsigned blocksize = sci->sc_super->s_blocksize;
379 void *p;
380
381 if (unlikely(ssp->offset + bytes > blocksize)) {
382 ssp->offset = 0;
383 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
384 &segbuf->sb_segsum_buffers));
385 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
386 }
387 p = ssp->bh->b_data + ssp->offset;
388 ssp->offset += bytes;
389 return p;
390}
391
392/**
393 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
394 * @sci: nilfs_sc_info
395 */
396static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
397{
398 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
399 struct buffer_head *sumbh;
400 unsigned sumbytes;
401 unsigned flags = 0;
402 int err;
403
404 if (nilfs_doing_gc())
405 flags = NILFS_SS_GC;
406 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime);
407 if (unlikely(err))
408 return err;
409
410 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
411 sumbytes = segbuf->sb_sum.sumbytes;
412 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
413 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
414 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
415 return 0;
416}
417
418static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
419{
420 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
421 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
422 return -E2BIG; /* The current segment is filled up
423 (internal code) */
424 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
425 return nilfs_segctor_reset_segment_buffer(sci);
426}
427
428static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
429{
430 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
431 int err;
432
433 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
434 err = nilfs_segctor_feed_segment(sci);
435 if (err)
436 return err;
437 segbuf = sci->sc_curseg;
438 }
439 err = nilfs_segbuf_extend_payload(segbuf, &sci->sc_super_root);
440 if (likely(!err))
441 segbuf->sb_sum.flags |= NILFS_SS_SR;
442 return err;
443}
444
445/*
446 * Functions for making segment summary and payloads
447 */
448static int nilfs_segctor_segsum_block_required(
449 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
450 unsigned binfo_size)
451{
452 unsigned blocksize = sci->sc_super->s_blocksize;
453 /* Size of finfo and binfo is enough small against blocksize */
454
455 return ssp->offset + binfo_size +
456 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
457 blocksize;
458}
459
460static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
461 struct inode *inode)
462{
463 sci->sc_curseg->sb_sum.nfinfo++;
464 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
465 nilfs_segctor_map_segsum_entry(
466 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
467 /* skip finfo */
468}
469
470static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
471 struct inode *inode)
472{
473 struct nilfs_finfo *finfo;
474 struct nilfs_inode_info *ii;
475 struct nilfs_segment_buffer *segbuf;
476
477 if (sci->sc_blk_cnt == 0)
478 return;
479
480 ii = NILFS_I(inode);
481 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
482 sizeof(*finfo));
483 finfo->fi_ino = cpu_to_le64(inode->i_ino);
484 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
485 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
486 finfo->fi_cno = cpu_to_le64(ii->i_cno);
487
488 segbuf = sci->sc_curseg;
489 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
490 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
491 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
492 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
493}
494
495static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
496 struct buffer_head *bh,
497 struct inode *inode,
498 unsigned binfo_size)
499{
500 struct nilfs_segment_buffer *segbuf;
501 int required, err = 0;
502
503 retry:
504 segbuf = sci->sc_curseg;
505 required = nilfs_segctor_segsum_block_required(
506 sci, &sci->sc_binfo_ptr, binfo_size);
507 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
508 nilfs_segctor_end_finfo(sci, inode);
509 err = nilfs_segctor_feed_segment(sci);
510 if (err)
511 return err;
512 goto retry;
513 }
514 if (unlikely(required)) {
515 err = nilfs_segbuf_extend_segsum(segbuf);
516 if (unlikely(err))
517 goto failed;
518 }
519 if (sci->sc_blk_cnt == 0)
520 nilfs_segctor_begin_finfo(sci, inode);
521
522 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
523 /* Substitution to vblocknr is delayed until update_blocknr() */
524 nilfs_segbuf_add_file_buffer(segbuf, bh);
525 sci->sc_blk_cnt++;
526 failed:
527 return err;
528}
529
530static int nilfs_handle_bmap_error(int err, const char *fname,
531 struct inode *inode, struct super_block *sb)
532{
533 if (err == -EINVAL) {
534 nilfs_error(sb, fname, "broken bmap (inode=%lu)\n",
535 inode->i_ino);
536 err = -EIO;
537 }
538 return err;
539}
540
541/*
542 * Callback functions that enumerate, mark, and collect dirty blocks
543 */
544static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
545 struct buffer_head *bh, struct inode *inode)
546{
547 int err;
548
549 /* BUG_ON(!buffer_dirty(bh)); */
550 /* excluded by scan_dirty_data_buffers() */
551 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
552 if (unlikely(err < 0))
553 return nilfs_handle_bmap_error(err, __func__, inode,
554 sci->sc_super);
555
556 err = nilfs_segctor_add_file_block(sci, bh, inode,
557 sizeof(struct nilfs_binfo_v));
558 if (!err)
559 sci->sc_datablk_cnt++;
560 return err;
561}
562
563static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
564 struct buffer_head *bh,
565 struct inode *inode)
566{
567 int err;
568
569 /* BUG_ON(!buffer_dirty(bh)); */
570 /* excluded by scan_dirty_node_buffers() */
571 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
572 if (unlikely(err < 0))
573 return nilfs_handle_bmap_error(err, __func__, inode,
574 sci->sc_super);
575 return 0;
576}
577
578static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
579 struct buffer_head *bh,
580 struct inode *inode)
581{
582 BUG_ON(!buffer_dirty(bh));
583 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
584}
585
586static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
587 struct nilfs_segsum_pointer *ssp,
588 union nilfs_binfo *binfo)
589{
590 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
591 sci, ssp, sizeof(*binfo_v));
592 *binfo_v = binfo->bi_v;
593}
594
595static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
596 struct nilfs_segsum_pointer *ssp,
597 union nilfs_binfo *binfo)
598{
599 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
600 sci, ssp, sizeof(*vblocknr));
601 *vblocknr = binfo->bi_v.bi_vblocknr;
602}
603
604struct nilfs_sc_operations nilfs_sc_file_ops = {
605 .collect_data = nilfs_collect_file_data,
606 .collect_node = nilfs_collect_file_node,
607 .collect_bmap = nilfs_collect_file_bmap,
608 .write_data_binfo = nilfs_write_file_data_binfo,
609 .write_node_binfo = nilfs_write_file_node_binfo,
610};
611
612static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
613 struct buffer_head *bh, struct inode *inode)
614{
615 int err;
616
617 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
618 if (unlikely(err < 0))
619 return nilfs_handle_bmap_error(err, __func__, inode,
620 sci->sc_super);
621
622 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
623 if (!err)
624 sci->sc_datablk_cnt++;
625 return err;
626}
627
628static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
629 struct buffer_head *bh, struct inode *inode)
630{
631 BUG_ON(!buffer_dirty(bh));
632 return nilfs_segctor_add_file_block(sci, bh, inode,
633 sizeof(struct nilfs_binfo_dat));
634}
635
636static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
637 struct nilfs_segsum_pointer *ssp,
638 union nilfs_binfo *binfo)
639{
640 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
641 sizeof(*blkoff));
642 *blkoff = binfo->bi_dat.bi_blkoff;
643}
644
645static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
646 struct nilfs_segsum_pointer *ssp,
647 union nilfs_binfo *binfo)
648{
649 struct nilfs_binfo_dat *binfo_dat =
650 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
651 *binfo_dat = binfo->bi_dat;
652}
653
654struct nilfs_sc_operations nilfs_sc_dat_ops = {
655 .collect_data = nilfs_collect_dat_data,
656 .collect_node = nilfs_collect_file_node,
657 .collect_bmap = nilfs_collect_dat_bmap,
658 .write_data_binfo = nilfs_write_dat_data_binfo,
659 .write_node_binfo = nilfs_write_dat_node_binfo,
660};
661
662struct nilfs_sc_operations nilfs_sc_dsync_ops = {
663 .collect_data = nilfs_collect_file_data,
664 .collect_node = NULL,
665 .collect_bmap = NULL,
666 .write_data_binfo = nilfs_write_file_data_binfo,
667 .write_node_binfo = NULL,
668};
669
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -0700670static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
671 struct list_head *listp,
672 size_t nlimit,
673 loff_t start, loff_t end)
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700674{
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700675 struct address_space *mapping = inode->i_mapping;
676 struct pagevec pvec;
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -0700677 pgoff_t index = 0, last = ULONG_MAX;
678 size_t ndirties = 0;
679 int i;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700680
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -0700681 if (unlikely(start != 0 || end != LLONG_MAX)) {
682 /*
683 * A valid range is given for sync-ing data pages. The
684 * range is rounded to per-page; extra dirty buffers
685 * may be included if blocksize < pagesize.
686 */
687 index = start >> PAGE_SHIFT;
688 last = end >> PAGE_SHIFT;
689 }
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700690 pagevec_init(&pvec, 0);
691 repeat:
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -0700692 if (unlikely(index > last) ||
693 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
694 min_t(pgoff_t, last - index,
695 PAGEVEC_SIZE - 1) + 1))
696 return ndirties;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700697
698 for (i = 0; i < pagevec_count(&pvec); i++) {
699 struct buffer_head *bh, *head;
700 struct page *page = pvec.pages[i];
701
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -0700702 if (unlikely(page->index > last))
703 break;
704
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700705 if (mapping->host) {
706 lock_page(page);
707 if (!page_has_buffers(page))
708 create_empty_buffers(page,
709 1 << inode->i_blkbits, 0);
710 unlock_page(page);
711 }
712
713 bh = head = page_buffers(page);
714 do {
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -0700715 if (!buffer_dirty(bh))
716 continue;
717 get_bh(bh);
718 list_add_tail(&bh->b_assoc_buffers, listp);
719 ndirties++;
720 if (unlikely(ndirties >= nlimit)) {
721 pagevec_release(&pvec);
722 cond_resched();
723 return ndirties;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700724 }
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -0700725 } while (bh = bh->b_this_page, bh != head);
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700726 }
727 pagevec_release(&pvec);
728 cond_resched();
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -0700729 goto repeat;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -0700730}
731
732static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
733 struct list_head *listp)
734{
735 struct nilfs_inode_info *ii = NILFS_I(inode);
736 struct address_space *mapping = &ii->i_btnode_cache;
737 struct pagevec pvec;
738 struct buffer_head *bh, *head;
739 unsigned int i;
740 pgoff_t index = 0;
741
742 pagevec_init(&pvec, 0);
743
744 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
745 PAGEVEC_SIZE)) {
746 for (i = 0; i < pagevec_count(&pvec); i++) {
747 bh = head = page_buffers(pvec.pages[i]);
748 do {
749 if (buffer_dirty(bh)) {
750 get_bh(bh);
751 list_add_tail(&bh->b_assoc_buffers,
752 listp);
753 }
754 bh = bh->b_this_page;
755 } while (bh != head);
756 }
757 pagevec_release(&pvec);
758 cond_resched();
759 }
760}
761
762static void nilfs_dispose_list(struct nilfs_sb_info *sbi,
763 struct list_head *head, int force)
764{
765 struct nilfs_inode_info *ii, *n;
766 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
767 unsigned nv = 0;
768
769 while (!list_empty(head)) {
770 spin_lock(&sbi->s_inode_lock);
771 list_for_each_entry_safe(ii, n, head, i_dirty) {
772 list_del_init(&ii->i_dirty);
773 if (force) {
774 if (unlikely(ii->i_bh)) {
775 brelse(ii->i_bh);
776 ii->i_bh = NULL;
777 }
778 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
779 set_bit(NILFS_I_QUEUED, &ii->i_state);
780 list_add_tail(&ii->i_dirty,
781 &sbi->s_dirty_files);
782 continue;
783 }
784 ivec[nv++] = ii;
785 if (nv == SC_N_INODEVEC)
786 break;
787 }
788 spin_unlock(&sbi->s_inode_lock);
789
790 for (pii = ivec; nv > 0; pii++, nv--)
791 iput(&(*pii)->vfs_inode);
792 }
793}
794
795static int nilfs_test_metadata_dirty(struct nilfs_sb_info *sbi)
796{
797 struct the_nilfs *nilfs = sbi->s_nilfs;
798 int ret = 0;
799
800 if (nilfs_mdt_fetch_dirty(sbi->s_ifile))
801 ret++;
802 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
803 ret++;
804 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
805 ret++;
806 if (ret || nilfs_doing_gc())
807 if (nilfs_mdt_fetch_dirty(nilfs_dat_inode(nilfs)))
808 ret++;
809 return ret;
810}
811
812static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
813{
814 return list_empty(&sci->sc_dirty_files) &&
815 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
816 list_empty(&sci->sc_cleaning_segments) &&
817 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
818}
819
820static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
821{
822 struct nilfs_sb_info *sbi = sci->sc_sbi;
823 int ret = 0;
824
825 if (nilfs_test_metadata_dirty(sbi))
826 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
827
828 spin_lock(&sbi->s_inode_lock);
829 if (list_empty(&sbi->s_dirty_files) && nilfs_segctor_clean(sci))
830 ret++;
831
832 spin_unlock(&sbi->s_inode_lock);
833 return ret;
834}
835
836static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
837{
838 struct nilfs_sb_info *sbi = sci->sc_sbi;
839 struct the_nilfs *nilfs = sbi->s_nilfs;
840
841 nilfs_mdt_clear_dirty(sbi->s_ifile);
842 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
843 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
844 nilfs_mdt_clear_dirty(nilfs_dat_inode(nilfs));
845}
846
847static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
848{
849 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
850 struct buffer_head *bh_cp;
851 struct nilfs_checkpoint *raw_cp;
852 int err;
853
854 /* XXX: this interface will be changed */
855 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
856 &raw_cp, &bh_cp);
857 if (likely(!err)) {
858 /* The following code is duplicated with cpfile. But, it is
859 needed to collect the checkpoint even if it was not newly
860 created */
861 nilfs_mdt_mark_buffer_dirty(bh_cp);
862 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
863 nilfs_cpfile_put_checkpoint(
864 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
865 } else {
866 BUG_ON(err == -EINVAL || err == -ENOENT);
867 }
868 return err;
869}
870
871static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
872{
873 struct nilfs_sb_info *sbi = sci->sc_sbi;
874 struct the_nilfs *nilfs = sbi->s_nilfs;
875 struct buffer_head *bh_cp;
876 struct nilfs_checkpoint *raw_cp;
877 int err;
878
879 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
880 &raw_cp, &bh_cp);
881 if (unlikely(err)) {
882 BUG_ON(err == -EINVAL || err == -ENOENT);
883 goto failed_ibh;
884 }
885 raw_cp->cp_snapshot_list.ssl_next = 0;
886 raw_cp->cp_snapshot_list.ssl_prev = 0;
887 raw_cp->cp_inodes_count =
888 cpu_to_le64(atomic_read(&sbi->s_inodes_count));
889 raw_cp->cp_blocks_count =
890 cpu_to_le64(atomic_read(&sbi->s_blocks_count));
891 raw_cp->cp_nblk_inc =
892 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
893 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
894 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
895 if (sci->sc_sketch_inode && i_size_read(sci->sc_sketch_inode) > 0)
896 nilfs_checkpoint_set_sketch(raw_cp);
897 nilfs_write_inode_common(sbi->s_ifile, &raw_cp->cp_ifile_inode, 1);
898 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
899 return 0;
900
901 failed_ibh:
902 return err;
903}
904
905static void nilfs_fill_in_file_bmap(struct inode *ifile,
906 struct nilfs_inode_info *ii)
907
908{
909 struct buffer_head *ibh;
910 struct nilfs_inode *raw_inode;
911
912 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
913 ibh = ii->i_bh;
914 BUG_ON(!ibh);
915 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
916 ibh);
917 nilfs_bmap_write(ii->i_bmap, raw_inode);
918 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
919 }
920}
921
922static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci,
923 struct inode *ifile)
924{
925 struct nilfs_inode_info *ii;
926
927 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
928 nilfs_fill_in_file_bmap(ifile, ii);
929 set_bit(NILFS_I_COLLECTED, &ii->i_state);
930 }
931 if (sci->sc_sketch_inode) {
932 ii = NILFS_I(sci->sc_sketch_inode);
933 if (test_bit(NILFS_I_DIRTY, &ii->i_state))
934 nilfs_fill_in_file_bmap(ifile, ii);
935 }
936}
937
938/*
939 * CRC calculation routines
940 */
941static void nilfs_fill_in_super_root_crc(struct buffer_head *bh_sr, u32 seed)
942{
943 struct nilfs_super_root *raw_sr =
944 (struct nilfs_super_root *)bh_sr->b_data;
945 u32 crc;
946
947 BUG_ON(NILFS_SR_BYTES > bh_sr->b_size);
948 crc = crc32_le(seed,
949 (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
950 NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
951 raw_sr->sr_sum = cpu_to_le32(crc);
952}
953
954static void nilfs_segctor_fill_in_checksums(struct nilfs_sc_info *sci,
955 u32 seed)
956{
957 struct nilfs_segment_buffer *segbuf;
958
959 if (sci->sc_super_root)
960 nilfs_fill_in_super_root_crc(sci->sc_super_root, seed);
961
962 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
963 nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
964 nilfs_segbuf_fill_in_data_crc(segbuf, seed);
965 }
966}
967
968static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
969 struct the_nilfs *nilfs)
970{
971 struct buffer_head *bh_sr = sci->sc_super_root;
972 struct nilfs_super_root *raw_sr =
973 (struct nilfs_super_root *)bh_sr->b_data;
974 unsigned isz = nilfs->ns_inode_size;
975
976 raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES);
977 raw_sr->sr_nongc_ctime
978 = cpu_to_le64(nilfs_doing_gc() ?
979 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
980 raw_sr->sr_flags = 0;
981
982 nilfs_mdt_write_inode_direct(
983 nilfs_dat_inode(nilfs), bh_sr, NILFS_SR_DAT_OFFSET(isz));
984 nilfs_mdt_write_inode_direct(
985 nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(isz));
986 nilfs_mdt_write_inode_direct(
987 nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(isz));
988}
989
990static void nilfs_redirty_inodes(struct list_head *head)
991{
992 struct nilfs_inode_info *ii;
993
994 list_for_each_entry(ii, head, i_dirty) {
995 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
996 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
997 }
998}
999
1000static void nilfs_drop_collected_inodes(struct list_head *head)
1001{
1002 struct nilfs_inode_info *ii;
1003
1004 list_for_each_entry(ii, head, i_dirty) {
1005 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1006 continue;
1007
1008 clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
1009 set_bit(NILFS_I_UPDATED, &ii->i_state);
1010 }
1011}
1012
1013static void nilfs_segctor_cancel_free_segments(struct nilfs_sc_info *sci,
1014 struct inode *sufile)
1015
1016{
1017 struct list_head *head = &sci->sc_cleaning_segments;
1018 struct nilfs_segment_entry *ent;
1019 int err;
1020
1021 list_for_each_entry(ent, head, list) {
1022 if (!(ent->flags & NILFS_SLH_FREED))
1023 break;
1024 err = nilfs_sufile_cancel_free(sufile, ent->segnum);
1025 BUG_ON(err);
1026
1027 ent->flags &= ~NILFS_SLH_FREED;
1028 }
1029}
1030
1031static int nilfs_segctor_prepare_free_segments(struct nilfs_sc_info *sci,
1032 struct inode *sufile)
1033{
1034 struct list_head *head = &sci->sc_cleaning_segments;
1035 struct nilfs_segment_entry *ent;
1036 int err;
1037
1038 list_for_each_entry(ent, head, list) {
1039 err = nilfs_sufile_free(sufile, ent->segnum);
1040 if (unlikely(err))
1041 return err;
1042 ent->flags |= NILFS_SLH_FREED;
1043 }
1044 return 0;
1045}
1046
1047static void nilfs_segctor_commit_free_segments(struct nilfs_sc_info *sci)
1048{
1049 nilfs_dispose_segment_list(&sci->sc_cleaning_segments);
1050}
1051
1052static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1053 struct inode *inode,
1054 struct list_head *listp,
1055 int (*collect)(struct nilfs_sc_info *,
1056 struct buffer_head *,
1057 struct inode *))
1058{
1059 struct buffer_head *bh, *n;
1060 int err = 0;
1061
1062 if (collect) {
1063 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1064 list_del_init(&bh->b_assoc_buffers);
1065 err = collect(sci, bh, inode);
1066 brelse(bh);
1067 if (unlikely(err))
1068 goto dispose_buffers;
1069 }
1070 return 0;
1071 }
1072
1073 dispose_buffers:
1074 while (!list_empty(listp)) {
1075 bh = list_entry(listp->next, struct buffer_head,
1076 b_assoc_buffers);
1077 list_del_init(&bh->b_assoc_buffers);
1078 brelse(bh);
1079 }
1080 return err;
1081}
1082
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07001083static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1084{
1085 /* Remaining number of blocks within segment buffer */
1086 return sci->sc_segbuf_nblocks -
1087 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1088}
1089
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001090static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1091 struct inode *inode,
1092 struct nilfs_sc_operations *sc_ops)
1093{
1094 LIST_HEAD(data_buffers);
1095 LIST_HEAD(node_buffers);
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07001096 int err;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001097
1098 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07001099 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1100
1101 n = nilfs_lookup_dirty_data_buffers(
1102 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1103 if (n > rest) {
1104 err = nilfs_segctor_apply_buffers(
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001105 sci, inode, &data_buffers,
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07001106 sc_ops->collect_data);
1107 BUG_ON(!err); /* always receive -E2BIG or true error */
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001108 goto break_or_fail;
1109 }
1110 }
1111 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1112
1113 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1114 err = nilfs_segctor_apply_buffers(
1115 sci, inode, &data_buffers, sc_ops->collect_data);
1116 if (unlikely(err)) {
1117 /* dispose node list */
1118 nilfs_segctor_apply_buffers(
1119 sci, inode, &node_buffers, NULL);
1120 goto break_or_fail;
1121 }
1122 sci->sc_stage.flags |= NILFS_CF_NODE;
1123 }
1124 /* Collect node */
1125 err = nilfs_segctor_apply_buffers(
1126 sci, inode, &node_buffers, sc_ops->collect_node);
1127 if (unlikely(err))
1128 goto break_or_fail;
1129
1130 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1131 err = nilfs_segctor_apply_buffers(
1132 sci, inode, &node_buffers, sc_ops->collect_bmap);
1133 if (unlikely(err))
1134 goto break_or_fail;
1135
1136 nilfs_segctor_end_finfo(sci, inode);
1137 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1138
1139 break_or_fail:
1140 return err;
1141}
1142
1143static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1144 struct inode *inode)
1145{
1146 LIST_HEAD(data_buffers);
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07001147 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1148 int err;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001149
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07001150 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1151 sci->sc_dsync_start,
1152 sci->sc_dsync_end);
1153
1154 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1155 nilfs_collect_file_data);
1156 if (!err) {
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001157 nilfs_segctor_end_finfo(sci, inode);
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07001158 BUG_ON(n > rest);
1159 /* always receive -E2BIG or true error if n > rest */
1160 }
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001161 return err;
1162}
1163
1164static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1165{
1166 struct nilfs_sb_info *sbi = sci->sc_sbi;
1167 struct the_nilfs *nilfs = sbi->s_nilfs;
1168 struct list_head *head;
1169 struct nilfs_inode_info *ii;
1170 int err = 0;
1171
1172 switch (sci->sc_stage.scnt) {
1173 case NILFS_ST_INIT:
1174 /* Pre-processes */
1175 sci->sc_stage.flags = 0;
1176
1177 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1178 sci->sc_nblk_inc = 0;
1179 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1180 if (mode == SC_LSEG_DSYNC) {
1181 sci->sc_stage.scnt = NILFS_ST_DSYNC;
1182 goto dsync_mode;
1183 }
1184 }
1185
1186 sci->sc_stage.dirty_file_ptr = NULL;
1187 sci->sc_stage.gc_inode_ptr = NULL;
1188 if (mode == SC_FLUSH_DAT) {
1189 sci->sc_stage.scnt = NILFS_ST_DAT;
1190 goto dat_stage;
1191 }
1192 sci->sc_stage.scnt++; /* Fall through */
1193 case NILFS_ST_GC:
1194 if (nilfs_doing_gc()) {
1195 head = &sci->sc_gc_inodes;
1196 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1197 head, i_dirty);
1198 list_for_each_entry_continue(ii, head, i_dirty) {
1199 err = nilfs_segctor_scan_file(
1200 sci, &ii->vfs_inode,
1201 &nilfs_sc_file_ops);
1202 if (unlikely(err)) {
1203 sci->sc_stage.gc_inode_ptr = list_entry(
1204 ii->i_dirty.prev,
1205 struct nilfs_inode_info,
1206 i_dirty);
1207 goto break_or_fail;
1208 }
1209 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1210 }
1211 sci->sc_stage.gc_inode_ptr = NULL;
1212 }
1213 sci->sc_stage.scnt++; /* Fall through */
1214 case NILFS_ST_FILE:
1215 head = &sci->sc_dirty_files;
1216 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1217 i_dirty);
1218 list_for_each_entry_continue(ii, head, i_dirty) {
1219 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1220
1221 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1222 &nilfs_sc_file_ops);
1223 if (unlikely(err)) {
1224 sci->sc_stage.dirty_file_ptr =
1225 list_entry(ii->i_dirty.prev,
1226 struct nilfs_inode_info,
1227 i_dirty);
1228 goto break_or_fail;
1229 }
1230 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1231 /* XXX: required ? */
1232 }
1233 sci->sc_stage.dirty_file_ptr = NULL;
1234 if (mode == SC_FLUSH_FILE) {
1235 sci->sc_stage.scnt = NILFS_ST_DONE;
1236 return 0;
1237 }
1238 sci->sc_stage.scnt++; /* Fall through */
1239 case NILFS_ST_SKETCH:
1240 if (mode == SC_LSEG_SR && sci->sc_sketch_inode) {
1241 ii = NILFS_I(sci->sc_sketch_inode);
1242 if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
1243 sci->sc_sketch_inode->i_ctime.tv_sec
1244 = sci->sc_seg_ctime;
1245 sci->sc_sketch_inode->i_mtime.tv_sec
1246 = sci->sc_seg_ctime;
1247 err = nilfs_mark_inode_dirty(
1248 sci->sc_sketch_inode);
1249 if (unlikely(err))
1250 goto break_or_fail;
1251 }
1252 err = nilfs_segctor_scan_file(sci,
1253 sci->sc_sketch_inode,
1254 &nilfs_sc_file_ops);
1255 if (unlikely(err))
1256 goto break_or_fail;
1257 }
1258 sci->sc_stage.scnt++;
1259 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1260 /* Fall through */
1261 case NILFS_ST_IFILE:
1262 err = nilfs_segctor_scan_file(sci, sbi->s_ifile,
1263 &nilfs_sc_file_ops);
1264 if (unlikely(err))
1265 break;
1266 sci->sc_stage.scnt++;
1267 /* Creating a checkpoint */
1268 err = nilfs_segctor_create_checkpoint(sci);
1269 if (unlikely(err))
1270 break;
1271 /* Fall through */
1272 case NILFS_ST_CPFILE:
1273 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1274 &nilfs_sc_file_ops);
1275 if (unlikely(err))
1276 break;
1277 sci->sc_stage.scnt++; /* Fall through */
1278 case NILFS_ST_SUFILE:
1279 err = nilfs_segctor_prepare_free_segments(sci,
1280 nilfs->ns_sufile);
1281 if (unlikely(err))
1282 break;
1283 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1284 &nilfs_sc_file_ops);
1285 if (unlikely(err))
1286 break;
1287 sci->sc_stage.scnt++; /* Fall through */
1288 case NILFS_ST_DAT:
1289 dat_stage:
1290 err = nilfs_segctor_scan_file(sci, nilfs_dat_inode(nilfs),
1291 &nilfs_sc_dat_ops);
1292 if (unlikely(err))
1293 break;
1294 if (mode == SC_FLUSH_DAT) {
1295 sci->sc_stage.scnt = NILFS_ST_DONE;
1296 return 0;
1297 }
1298 sci->sc_stage.scnt++; /* Fall through */
1299 case NILFS_ST_SR:
1300 if (mode == SC_LSEG_SR) {
1301 /* Appending a super root */
1302 err = nilfs_segctor_add_super_root(sci);
1303 if (unlikely(err))
1304 break;
1305 }
1306 /* End of a logical segment */
1307 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1308 sci->sc_stage.scnt = NILFS_ST_DONE;
1309 return 0;
1310 case NILFS_ST_DSYNC:
1311 dsync_mode:
1312 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07001313 ii = sci->sc_dsync_inode;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001314 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1315 break;
1316
1317 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1318 if (unlikely(err))
1319 break;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07001320 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1321 sci->sc_stage.scnt = NILFS_ST_DONE;
1322 return 0;
1323 case NILFS_ST_DONE:
1324 return 0;
1325 default:
1326 BUG();
1327 }
1328
1329 break_or_fail:
1330 return err;
1331}
1332
1333static int nilfs_segctor_terminate_segment(struct nilfs_sc_info *sci,
1334 struct nilfs_segment_buffer *segbuf,
1335 struct inode *sufile)
1336{
1337 struct nilfs_segment_entry *ent = segbuf->sb_segent;
1338 int err;
1339
1340 err = nilfs_open_segment_entry(ent, sufile);
1341 if (unlikely(err))
1342 return err;
1343 nilfs_mdt_mark_buffer_dirty(ent->bh_su);
1344 nilfs_mdt_mark_dirty(sufile);
1345 nilfs_close_segment_entry(ent, sufile);
1346
1347 list_add_tail(&ent->list, &sci->sc_active_segments);
1348 segbuf->sb_segent = NULL;
1349 return 0;
1350}
1351
1352static int nilfs_touch_segusage(struct inode *sufile, __u64 segnum)
1353{
1354 struct buffer_head *bh_su;
1355 struct nilfs_segment_usage *raw_su;
1356 int err;
1357
1358 err = nilfs_sufile_get_segment_usage(sufile, segnum, &raw_su, &bh_su);
1359 if (unlikely(err))
1360 return err;
1361 nilfs_mdt_mark_buffer_dirty(bh_su);
1362 nilfs_mdt_mark_dirty(sufile);
1363 nilfs_sufile_put_segment_usage(sufile, segnum, bh_su);
1364 return 0;
1365}
1366
1367static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1368 struct the_nilfs *nilfs)
1369{
1370 struct nilfs_segment_buffer *segbuf, *n;
1371 struct inode *sufile = nilfs->ns_sufile;
1372 __u64 nextnum;
1373 int err;
1374
1375 if (list_empty(&sci->sc_segbufs)) {
1376 segbuf = nilfs_segbuf_new(sci->sc_super);
1377 if (unlikely(!segbuf))
1378 return -ENOMEM;
1379 list_add(&segbuf->sb_list, &sci->sc_segbufs);
1380 } else
1381 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1382
1383 err = nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1384 nilfs->ns_pseg_offset, nilfs);
1385 if (unlikely(err))
1386 return err;
1387
1388 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1389 err = nilfs_segctor_terminate_segment(sci, segbuf, sufile);
1390 if (unlikely(err))
1391 return err;
1392
1393 nilfs_shift_to_next_segment(nilfs);
1394 err = nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1395 }
1396 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1397
1398 err = nilfs_touch_segusage(sufile, segbuf->sb_segnum);
1399 if (unlikely(err))
1400 return err;
1401
1402 if (nilfs->ns_segnum == nilfs->ns_nextnum) {
1403 /* Start from the head of a new full segment */
1404 err = nilfs_sufile_alloc(sufile, &nextnum);
1405 if (unlikely(err))
1406 return err;
1407 } else
1408 nextnum = nilfs->ns_nextnum;
1409
1410 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1411 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1412
1413 /* truncating segment buffers */
1414 list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs,
1415 sb_list) {
1416 list_del_init(&segbuf->sb_list);
1417 nilfs_segbuf_free(segbuf);
1418 }
1419 return err;
1420}
1421
1422static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1423 struct the_nilfs *nilfs, int nadd)
1424{
1425 struct nilfs_segment_buffer *segbuf, *prev, *n;
1426 struct inode *sufile = nilfs->ns_sufile;
1427 __u64 nextnextnum;
1428 LIST_HEAD(list);
1429 int err, ret, i;
1430
1431 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1432 /*
1433 * Since the segment specified with nextnum might be allocated during
1434 * the previous construction, the buffer including its segusage may
1435 * not be dirty. The following call ensures that the buffer is dirty
1436 * and will pin the buffer on memory until the sufile is written.
1437 */
1438 err = nilfs_touch_segusage(sufile, prev->sb_nextnum);
1439 if (unlikely(err))
1440 return err;
1441
1442 for (i = 0; i < nadd; i++) {
1443 /* extend segment info */
1444 err = -ENOMEM;
1445 segbuf = nilfs_segbuf_new(sci->sc_super);
1446 if (unlikely(!segbuf))
1447 goto failed;
1448
1449 /* map this buffer to region of segment on-disk */
1450 err = nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1451 if (unlikely(err))
1452 goto failed_segbuf;
1453
1454 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1455
1456 /* allocate the next next full segment */
1457 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1458 if (unlikely(err))
1459 goto failed_segbuf;
1460
1461 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1462 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1463
1464 list_add_tail(&segbuf->sb_list, &list);
1465 prev = segbuf;
1466 }
1467 list_splice(&list, sci->sc_segbufs.prev);
1468 return 0;
1469
1470 failed_segbuf:
1471 nilfs_segbuf_free(segbuf);
1472 failed:
1473 list_for_each_entry_safe(segbuf, n, &list, sb_list) {
1474 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1475 BUG_ON(ret);
1476 list_del_init(&segbuf->sb_list);
1477 nilfs_segbuf_free(segbuf);
1478 }
1479 return err;
1480}
1481
1482static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci,
1483 struct the_nilfs *nilfs)
1484{
1485 struct nilfs_segment_buffer *segbuf;
1486 int ret, done = 0;
1487
1488 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1489 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1490 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
1491 BUG_ON(ret);
1492 }
1493 if (segbuf->sb_io_error) {
1494 /* Case 1: The first segment failed */
1495 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1496 /* Case 1a: Partial segment appended into an existing
1497 segment */
1498 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1499 segbuf->sb_fseg_end);
1500 else /* Case 1b: New full segment */
1501 set_nilfs_discontinued(nilfs);
1502 done++;
1503 }
1504
1505 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1506 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
1507 BUG_ON(ret);
1508 if (!done && segbuf->sb_io_error) {
1509 if (segbuf->sb_segnum != nilfs->ns_nextnum)
1510 /* Case 2: extended segment (!= next) failed */
1511 nilfs_sufile_set_error(nilfs->ns_sufile,
1512 segbuf->sb_segnum);
1513 done++;
1514 }
1515 }
1516}
1517
1518static void nilfs_segctor_clear_segment_buffers(struct nilfs_sc_info *sci)
1519{
1520 struct nilfs_segment_buffer *segbuf;
1521
1522 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list)
1523 nilfs_segbuf_clear(segbuf);
1524 sci->sc_super_root = NULL;
1525}
1526
1527static void nilfs_segctor_destroy_segment_buffers(struct nilfs_sc_info *sci)
1528{
1529 struct nilfs_segment_buffer *segbuf;
1530
1531 while (!list_empty(&sci->sc_segbufs)) {
1532 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1533 list_del_init(&segbuf->sb_list);
1534 nilfs_segbuf_free(segbuf);
1535 }
1536 /* sci->sc_curseg = NULL; */
1537}
1538
1539static void nilfs_segctor_end_construction(struct nilfs_sc_info *sci,
1540 struct the_nilfs *nilfs, int err)
1541{
1542 if (unlikely(err)) {
1543 nilfs_segctor_free_incomplete_segments(sci, nilfs);
1544 nilfs_segctor_cancel_free_segments(sci, nilfs->ns_sufile);
1545 }
1546 nilfs_segctor_clear_segment_buffers(sci);
1547}
1548
1549static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1550 struct inode *sufile)
1551{
1552 struct nilfs_segment_buffer *segbuf;
1553 struct buffer_head *bh_su;
1554 struct nilfs_segment_usage *raw_su;
1555 unsigned long live_blocks;
1556 int ret;
1557
1558 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1559 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
1560 &raw_su, &bh_su);
1561 BUG_ON(ret); /* always succeed because bh_su is dirty */
1562 live_blocks = segbuf->sb_sum.nblocks +
1563 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1564 raw_su->su_lastmod = cpu_to_le64(sci->sc_seg_ctime);
1565 raw_su->su_nblocks = cpu_to_le32(live_blocks);
1566 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum,
1567 bh_su);
1568 }
1569}
1570
1571static void nilfs_segctor_cancel_segusage(struct nilfs_sc_info *sci,
1572 struct inode *sufile)
1573{
1574 struct nilfs_segment_buffer *segbuf;
1575 struct buffer_head *bh_su;
1576 struct nilfs_segment_usage *raw_su;
1577 int ret;
1578
1579 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1580 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
1581 &raw_su, &bh_su);
1582 BUG_ON(ret); /* always succeed because bh_su is dirty */
1583 raw_su->su_nblocks = cpu_to_le32(segbuf->sb_pseg_start -
1584 segbuf->sb_fseg_start);
1585 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, bh_su);
1586
1587 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1588 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
1589 &raw_su, &bh_su);
1590 BUG_ON(ret); /* always succeed */
1591 raw_su->su_nblocks = 0;
1592 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum,
1593 bh_su);
1594 }
1595}
1596
1597static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1598 struct nilfs_segment_buffer *last,
1599 struct inode *sufile)
1600{
1601 struct nilfs_segment_buffer *segbuf = last, *n;
1602 int ret;
1603
1604 list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs,
1605 sb_list) {
1606 list_del_init(&segbuf->sb_list);
1607 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1608 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1609 BUG_ON(ret);
1610 nilfs_segbuf_free(segbuf);
1611 }
1612}
1613
1614
1615static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1616 struct the_nilfs *nilfs, int mode)
1617{
1618 struct nilfs_cstage prev_stage = sci->sc_stage;
1619 int err, nadd = 1;
1620
1621 /* Collection retry loop */
1622 for (;;) {
1623 sci->sc_super_root = NULL;
1624 sci->sc_nblk_this_inc = 0;
1625 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1626
1627 err = nilfs_segctor_reset_segment_buffer(sci);
1628 if (unlikely(err))
1629 goto failed;
1630
1631 err = nilfs_segctor_collect_blocks(sci, mode);
1632 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1633 if (!err)
1634 break;
1635
1636 if (unlikely(err != -E2BIG))
1637 goto failed;
1638
1639 /* The current segment is filled up */
1640 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1641 break;
1642
1643 nilfs_segctor_cancel_free_segments(sci, nilfs->ns_sufile);
1644 nilfs_segctor_clear_segment_buffers(sci);
1645
1646 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1647 if (unlikely(err))
1648 return err;
1649
1650 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1651 sci->sc_stage = prev_stage;
1652 }
1653 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1654 return 0;
1655
1656 failed:
1657 return err;
1658}
1659
1660static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1661 struct buffer_head *new_bh)
1662{
1663 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1664
1665 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1666 /* The caller must release old_bh */
1667}
1668
1669static int
1670nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1671 struct nilfs_segment_buffer *segbuf,
1672 int mode)
1673{
1674 struct inode *inode = NULL;
1675 sector_t blocknr;
1676 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1677 unsigned long nblocks = 0, ndatablk = 0;
1678 struct nilfs_sc_operations *sc_op = NULL;
1679 struct nilfs_segsum_pointer ssp;
1680 struct nilfs_finfo *finfo = NULL;
1681 union nilfs_binfo binfo;
1682 struct buffer_head *bh, *bh_org;
1683 ino_t ino = 0;
1684 int err = 0;
1685
1686 if (!nfinfo)
1687 goto out;
1688
1689 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1690 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1691 ssp.offset = sizeof(struct nilfs_segment_summary);
1692
1693 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1694 if (bh == sci->sc_super_root)
1695 break;
1696 if (!finfo) {
1697 finfo = nilfs_segctor_map_segsum_entry(
1698 sci, &ssp, sizeof(*finfo));
1699 ino = le64_to_cpu(finfo->fi_ino);
1700 nblocks = le32_to_cpu(finfo->fi_nblocks);
1701 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1702
1703 if (buffer_nilfs_node(bh))
1704 inode = NILFS_BTNC_I(bh->b_page->mapping);
1705 else
1706 inode = NILFS_AS_I(bh->b_page->mapping);
1707
1708 if (mode == SC_LSEG_DSYNC)
1709 sc_op = &nilfs_sc_dsync_ops;
1710 else if (ino == NILFS_DAT_INO)
1711 sc_op = &nilfs_sc_dat_ops;
1712 else /* file blocks */
1713 sc_op = &nilfs_sc_file_ops;
1714 }
1715 bh_org = bh;
1716 get_bh(bh_org);
1717 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1718 &binfo);
1719 if (bh != bh_org)
1720 nilfs_list_replace_buffer(bh_org, bh);
1721 brelse(bh_org);
1722 if (unlikely(err))
1723 goto failed_bmap;
1724
1725 if (ndatablk > 0)
1726 sc_op->write_data_binfo(sci, &ssp, &binfo);
1727 else
1728 sc_op->write_node_binfo(sci, &ssp, &binfo);
1729
1730 blocknr++;
1731 if (--nblocks == 0) {
1732 finfo = NULL;
1733 if (--nfinfo == 0)
1734 break;
1735 } else if (ndatablk > 0)
1736 ndatablk--;
1737 }
1738 out:
1739 return 0;
1740
1741 failed_bmap:
1742 err = nilfs_handle_bmap_error(err, __func__, inode, sci->sc_super);
1743 return err;
1744}
1745
1746static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1747{
1748 struct nilfs_segment_buffer *segbuf;
1749 int err;
1750
1751 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1752 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1753 if (unlikely(err))
1754 return err;
1755 nilfs_segbuf_fill_in_segsum(segbuf);
1756 }
1757 return 0;
1758}
1759
1760static int
1761nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
1762{
1763 struct page *clone_page;
1764 struct buffer_head *bh, *head, *bh2;
1765 void *kaddr;
1766
1767 bh = head = page_buffers(page);
1768
1769 clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0);
1770 if (unlikely(!clone_page))
1771 return -ENOMEM;
1772
1773 bh2 = page_buffers(clone_page);
1774 kaddr = kmap_atomic(page, KM_USER0);
1775 do {
1776 if (list_empty(&bh->b_assoc_buffers))
1777 continue;
1778 get_bh(bh2);
1779 page_cache_get(clone_page); /* for each bh */
1780 memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size);
1781 bh2->b_blocknr = bh->b_blocknr;
1782 list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers);
1783 list_add_tail(&bh->b_assoc_buffers, out);
1784 } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head);
1785 kunmap_atomic(kaddr, KM_USER0);
1786
1787 if (!TestSetPageWriteback(clone_page))
1788 inc_zone_page_state(clone_page, NR_WRITEBACK);
1789 unlock_page(clone_page);
1790
1791 return 0;
1792}
1793
1794static int nilfs_test_page_to_be_frozen(struct page *page)
1795{
1796 struct address_space *mapping = page->mapping;
1797
1798 if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode))
1799 return 0;
1800
1801 if (page_mapped(page)) {
1802 ClearPageChecked(page);
1803 return 1;
1804 }
1805 return PageChecked(page);
1806}
1807
1808static int nilfs_begin_page_io(struct page *page, struct list_head *out)
1809{
1810 if (!page || PageWriteback(page))
1811 /* For split b-tree node pages, this function may be called
1812 twice. We ignore the 2nd or later calls by this check. */
1813 return 0;
1814
1815 lock_page(page);
1816 clear_page_dirty_for_io(page);
1817 set_page_writeback(page);
1818 unlock_page(page);
1819
1820 if (nilfs_test_page_to_be_frozen(page)) {
1821 int err = nilfs_copy_replace_page_buffers(page, out);
1822 if (unlikely(err))
1823 return err;
1824 }
1825 return 0;
1826}
1827
1828static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
1829 struct page **failed_page)
1830{
1831 struct nilfs_segment_buffer *segbuf;
1832 struct page *bd_page = NULL, *fs_page = NULL;
1833 struct list_head *list = &sci->sc_copied_buffers;
1834 int err;
1835
1836 *failed_page = NULL;
1837 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1838 struct buffer_head *bh;
1839
1840 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1841 b_assoc_buffers) {
1842 if (bh->b_page != bd_page) {
1843 if (bd_page) {
1844 lock_page(bd_page);
1845 clear_page_dirty_for_io(bd_page);
1846 set_page_writeback(bd_page);
1847 unlock_page(bd_page);
1848 }
1849 bd_page = bh->b_page;
1850 }
1851 }
1852
1853 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1854 b_assoc_buffers) {
1855 if (bh == sci->sc_super_root) {
1856 if (bh->b_page != bd_page) {
1857 lock_page(bd_page);
1858 clear_page_dirty_for_io(bd_page);
1859 set_page_writeback(bd_page);
1860 unlock_page(bd_page);
1861 bd_page = bh->b_page;
1862 }
1863 break;
1864 }
1865 if (bh->b_page != fs_page) {
1866 err = nilfs_begin_page_io(fs_page, list);
1867 if (unlikely(err)) {
1868 *failed_page = fs_page;
1869 goto out;
1870 }
1871 fs_page = bh->b_page;
1872 }
1873 }
1874 }
1875 if (bd_page) {
1876 lock_page(bd_page);
1877 clear_page_dirty_for_io(bd_page);
1878 set_page_writeback(bd_page);
1879 unlock_page(bd_page);
1880 }
1881 err = nilfs_begin_page_io(fs_page, list);
1882 if (unlikely(err))
1883 *failed_page = fs_page;
1884 out:
1885 return err;
1886}
1887
1888static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1889 struct backing_dev_info *bdi)
1890{
1891 struct nilfs_segment_buffer *segbuf;
1892 struct nilfs_write_info wi;
1893 int err, res;
1894
1895 wi.sb = sci->sc_super;
1896 wi.bh_sr = sci->sc_super_root;
1897 wi.bdi = bdi;
1898
1899 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1900 nilfs_segbuf_prepare_write(segbuf, &wi);
1901 err = nilfs_segbuf_write(segbuf, &wi);
1902
1903 res = nilfs_segbuf_wait(segbuf, &wi);
1904 err = unlikely(err) ? : res;
1905 if (unlikely(err))
1906 return err;
1907 }
1908 return 0;
1909}
1910
1911static int nilfs_page_has_uncleared_buffer(struct page *page)
1912{
1913 struct buffer_head *head, *bh;
1914
1915 head = bh = page_buffers(page);
1916 do {
1917 if (buffer_dirty(bh) && !list_empty(&bh->b_assoc_buffers))
1918 return 1;
1919 bh = bh->b_this_page;
1920 } while (bh != head);
1921 return 0;
1922}
1923
1924static void __nilfs_end_page_io(struct page *page, int err)
1925{
1926 /* BUG_ON(err > 0); */
1927 if (!err) {
1928 if (!nilfs_page_buffers_clean(page))
1929 __set_page_dirty_nobuffers(page);
1930 ClearPageError(page);
1931 } else {
1932 __set_page_dirty_nobuffers(page);
1933 SetPageError(page);
1934 }
1935
1936 if (buffer_nilfs_allocated(page_buffers(page))) {
1937 if (TestClearPageWriteback(page))
1938 dec_zone_page_state(page, NR_WRITEBACK);
1939 } else
1940 end_page_writeback(page);
1941}
1942
1943static void nilfs_end_page_io(struct page *page, int err)
1944{
1945 if (!page)
1946 return;
1947
1948 if (buffer_nilfs_node(page_buffers(page)) &&
1949 nilfs_page_has_uncleared_buffer(page))
1950 /* For b-tree node pages, this function may be called twice
1951 or more because they might be split in a segment.
1952 This check assures that cleanup has been done for all
1953 buffers in a split btnode page. */
1954 return;
1955
1956 __nilfs_end_page_io(page, err);
1957}
1958
1959static void nilfs_clear_copied_buffers(struct list_head *list, int err)
1960{
1961 struct buffer_head *bh, *head;
1962 struct page *page;
1963
1964 while (!list_empty(list)) {
1965 bh = list_entry(list->next, struct buffer_head,
1966 b_assoc_buffers);
1967 page = bh->b_page;
1968 page_cache_get(page);
1969 head = bh = page_buffers(page);
1970 do {
1971 if (!list_empty(&bh->b_assoc_buffers)) {
1972 list_del_init(&bh->b_assoc_buffers);
1973 if (!err) {
1974 set_buffer_uptodate(bh);
1975 clear_buffer_dirty(bh);
1976 clear_buffer_nilfs_volatile(bh);
1977 }
1978 brelse(bh); /* for b_assoc_buffers */
1979 }
1980 } while ((bh = bh->b_this_page) != head);
1981
1982 __nilfs_end_page_io(page, err);
1983 page_cache_release(page);
1984 }
1985}
1986
1987static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
1988 struct page *failed_page, int err)
1989{
1990 struct nilfs_segment_buffer *segbuf;
1991 struct page *bd_page = NULL, *fs_page = NULL;
1992
1993 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1994 struct buffer_head *bh;
1995
1996 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1997 b_assoc_buffers) {
1998 if (bh->b_page != bd_page) {
1999 if (bd_page)
2000 end_page_writeback(bd_page);
2001 bd_page = bh->b_page;
2002 }
2003 }
2004
2005 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
2006 b_assoc_buffers) {
2007 if (bh == sci->sc_super_root) {
2008 if (bh->b_page != bd_page) {
2009 end_page_writeback(bd_page);
2010 bd_page = bh->b_page;
2011 }
2012 break;
2013 }
2014 if (bh->b_page != fs_page) {
2015 nilfs_end_page_io(fs_page, err);
2016 if (unlikely(fs_page == failed_page))
2017 goto done;
2018 fs_page = bh->b_page;
2019 }
2020 }
2021 }
2022 if (bd_page)
2023 end_page_writeback(bd_page);
2024
2025 nilfs_end_page_io(fs_page, err);
2026 done:
2027 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
2028}
2029
2030static void nilfs_set_next_segment(struct the_nilfs *nilfs,
2031 struct nilfs_segment_buffer *segbuf)
2032{
2033 nilfs->ns_segnum = segbuf->sb_segnum;
2034 nilfs->ns_nextnum = segbuf->sb_nextnum;
2035 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
2036 + segbuf->sb_sum.nblocks;
2037 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
2038 nilfs->ns_ctime = segbuf->sb_sum.ctime;
2039}
2040
2041static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
2042{
2043 struct nilfs_segment_buffer *segbuf;
2044 struct page *bd_page = NULL, *fs_page = NULL;
2045 struct nilfs_sb_info *sbi = sci->sc_sbi;
2046 struct the_nilfs *nilfs = sbi->s_nilfs;
2047 int update_sr = (sci->sc_super_root != NULL);
2048
2049 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
2050 struct buffer_head *bh;
2051
2052 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
2053 b_assoc_buffers) {
2054 set_buffer_uptodate(bh);
2055 clear_buffer_dirty(bh);
2056 if (bh->b_page != bd_page) {
2057 if (bd_page)
2058 end_page_writeback(bd_page);
2059 bd_page = bh->b_page;
2060 }
2061 }
2062 /*
2063 * We assume that the buffers which belong to the same page
2064 * continue over the buffer list.
2065 * Under this assumption, the last BHs of pages is
2066 * identifiable by the discontinuity of bh->b_page
2067 * (page != fs_page).
2068 *
2069 * For B-tree node blocks, however, this assumption is not
2070 * guaranteed. The cleanup code of B-tree node pages needs
2071 * special care.
2072 */
2073 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
2074 b_assoc_buffers) {
2075 set_buffer_uptodate(bh);
2076 clear_buffer_dirty(bh);
2077 clear_buffer_nilfs_volatile(bh);
2078 if (bh == sci->sc_super_root) {
2079 if (bh->b_page != bd_page) {
2080 end_page_writeback(bd_page);
2081 bd_page = bh->b_page;
2082 }
2083 break;
2084 }
2085 if (bh->b_page != fs_page) {
2086 nilfs_end_page_io(fs_page, 0);
2087 fs_page = bh->b_page;
2088 }
2089 }
2090
2091 if (!NILFS_SEG_SIMPLEX(&segbuf->sb_sum)) {
2092 if (NILFS_SEG_LOGBGN(&segbuf->sb_sum)) {
2093 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
2094 sci->sc_lseg_stime = jiffies;
2095 }
2096 if (NILFS_SEG_LOGEND(&segbuf->sb_sum))
2097 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
2098 }
2099 }
2100 /*
2101 * Since pages may continue over multiple segment buffers,
2102 * end of the last page must be checked outside of the loop.
2103 */
2104 if (bd_page)
2105 end_page_writeback(bd_page);
2106
2107 nilfs_end_page_io(fs_page, 0);
2108
2109 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0);
2110
2111 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
2112
2113 if (nilfs_doing_gc()) {
2114 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
2115 if (update_sr)
2116 nilfs_commit_gcdat_inode(nilfs);
Ryusuke Konishi1088dcf2009-04-06 19:01:51 -07002117 } else
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002118 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002119
2120 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
2121
2122 segbuf = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
2123 nilfs_set_next_segment(nilfs, segbuf);
2124
2125 if (update_sr) {
2126 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
2127 segbuf->sb_sum.seg_seq, nilfs->ns_cno);
2128
2129 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2130 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
2131 } else
2132 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
2133}
2134
2135static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
2136 struct nilfs_sb_info *sbi)
2137{
2138 struct nilfs_inode_info *ii, *n;
2139 __u64 cno = sbi->s_nilfs->ns_cno;
2140
2141 spin_lock(&sbi->s_inode_lock);
2142 retry:
2143 list_for_each_entry_safe(ii, n, &sbi->s_dirty_files, i_dirty) {
2144 if (!ii->i_bh) {
2145 struct buffer_head *ibh;
2146 int err;
2147
2148 spin_unlock(&sbi->s_inode_lock);
2149 err = nilfs_ifile_get_inode_block(
2150 sbi->s_ifile, ii->vfs_inode.i_ino, &ibh);
2151 if (unlikely(err)) {
2152 nilfs_warning(sbi->s_super, __func__,
2153 "failed to get inode block.\n");
2154 return err;
2155 }
2156 nilfs_mdt_mark_buffer_dirty(ibh);
2157 nilfs_mdt_mark_dirty(sbi->s_ifile);
2158 spin_lock(&sbi->s_inode_lock);
2159 if (likely(!ii->i_bh))
2160 ii->i_bh = ibh;
2161 else
2162 brelse(ibh);
2163 goto retry;
2164 }
2165 ii->i_cno = cno;
2166
2167 clear_bit(NILFS_I_QUEUED, &ii->i_state);
2168 set_bit(NILFS_I_BUSY, &ii->i_state);
2169 list_del(&ii->i_dirty);
2170 list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
2171 }
2172 spin_unlock(&sbi->s_inode_lock);
2173
2174 NILFS_I(sbi->s_ifile)->i_cno = cno;
2175
2176 return 0;
2177}
2178
2179static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
2180 struct nilfs_sb_info *sbi)
2181{
2182 struct nilfs_transaction_info *ti = current->journal_info;
2183 struct nilfs_inode_info *ii, *n;
2184 __u64 cno = sbi->s_nilfs->ns_cno;
2185
2186 spin_lock(&sbi->s_inode_lock);
2187 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2188 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
2189 test_bit(NILFS_I_DIRTY, &ii->i_state)) {
2190 /* The current checkpoint number (=nilfs->ns_cno) is
2191 changed between check-in and check-out only if the
2192 super root is written out. So, we can update i_cno
2193 for the inodes that remain in the dirty list. */
2194 ii->i_cno = cno;
2195 continue;
2196 }
2197 clear_bit(NILFS_I_BUSY, &ii->i_state);
2198 brelse(ii->i_bh);
2199 ii->i_bh = NULL;
2200 list_del(&ii->i_dirty);
2201 list_add_tail(&ii->i_dirty, &ti->ti_garbage);
2202 }
2203 spin_unlock(&sbi->s_inode_lock);
2204}
2205
2206/*
2207 * Nasty routines to manipulate active flags on sufile.
2208 * These would be removed in a future release.
2209 */
2210static void nilfs_segctor_reactivate_segments(struct nilfs_sc_info *sci,
2211 struct the_nilfs *nilfs)
2212{
2213 struct nilfs_segment_buffer *segbuf, *last;
2214 struct nilfs_segment_entry *ent, *n;
2215 struct inode *sufile = nilfs->ns_sufile;
2216 struct list_head *head;
2217
2218 last = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
2219 nilfs_for_each_segbuf_before(segbuf, last, &sci->sc_segbufs) {
2220 ent = segbuf->sb_segent;
2221 if (!ent)
2222 break; /* ignore unmapped segments (should check it?)*/
2223 nilfs_segment_usage_set_active(ent->raw_su);
2224 nilfs_close_segment_entry(ent, sufile);
2225 }
2226
2227 head = &sci->sc_active_segments;
2228 list_for_each_entry_safe(ent, n, head, list) {
2229 nilfs_segment_usage_set_active(ent->raw_su);
2230 nilfs_close_segment_entry(ent, sufile);
2231 }
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002232}
2233
2234static int nilfs_segctor_deactivate_segments(struct nilfs_sc_info *sci,
2235 struct the_nilfs *nilfs)
2236{
2237 struct nilfs_segment_buffer *segbuf, *last;
2238 struct nilfs_segment_entry *ent;
2239 struct inode *sufile = nilfs->ns_sufile;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002240 int err;
2241
2242 last = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
2243 nilfs_for_each_segbuf_before(segbuf, last, &sci->sc_segbufs) {
2244 /*
2245 * Deactivate ongoing full segments. The last segment is kept
2246 * active because it is a start point of recovery, and is not
2247 * relocatable until the super block points to a newer
2248 * checkpoint.
2249 */
2250 ent = segbuf->sb_segent;
2251 if (!ent)
2252 break; /* ignore unmapped segments (should check it?)*/
2253 err = nilfs_open_segment_entry(ent, sufile);
2254 if (unlikely(err))
2255 goto failed;
2256 nilfs_segment_usage_clear_active(ent->raw_su);
2257 BUG_ON(!buffer_dirty(ent->bh_su));
2258 }
2259
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -07002260 list_for_each_entry(ent, &sci->sc_active_segments, list) {
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002261 err = nilfs_open_segment_entry(ent, sufile);
2262 if (unlikely(err))
2263 goto failed;
2264 nilfs_segment_usage_clear_active(ent->raw_su);
2265 BUG_ON(!buffer_dirty(ent->bh_su));
2266 }
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002267 return 0;
2268
2269 failed:
2270 nilfs_segctor_reactivate_segments(sci, nilfs);
2271 return err;
2272}
2273
2274static void nilfs_segctor_bead_completed_segments(struct nilfs_sc_info *sci)
2275{
2276 struct nilfs_segment_buffer *segbuf, *last;
2277 struct nilfs_segment_entry *ent;
2278
2279 /* move each segbuf->sb_segent to the list of used active segments */
2280 last = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
2281 nilfs_for_each_segbuf_before(segbuf, last, &sci->sc_segbufs) {
2282 ent = segbuf->sb_segent;
2283 if (!ent)
2284 break; /* ignore unmapped segments (should check it?)*/
2285 list_add_tail(&ent->list, &sci->sc_active_segments);
2286 segbuf->sb_segent = NULL;
2287 }
2288}
2289
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -07002290static void nilfs_segctor_commit_deactivate_segments(struct nilfs_sc_info *sci,
2291 struct the_nilfs *nilfs)
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002292{
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -07002293 struct nilfs_segment_entry *ent, *n;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002294
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -07002295 list_for_each_entry_safe(ent, n, &sci->sc_active_segments, list) {
2296 list_del(&ent->list);
2297 nilfs_close_segment_entry(ent, nilfs->ns_sufile);
2298 nilfs_free_segment_entry(ent);
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002299 }
2300}
2301
2302/*
2303 * Main procedure of segment constructor
2304 */
2305static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2306{
2307 struct nilfs_sb_info *sbi = sci->sc_sbi;
2308 struct the_nilfs *nilfs = sbi->s_nilfs;
2309 struct page *failed_page;
2310 int err, has_sr = 0;
2311
2312 sci->sc_stage.scnt = NILFS_ST_INIT;
2313
2314 err = nilfs_segctor_check_in_files(sci, sbi);
2315 if (unlikely(err))
2316 goto out;
2317
2318 if (nilfs_test_metadata_dirty(sbi))
2319 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2320
2321 if (nilfs_segctor_clean(sci))
2322 goto out;
2323
2324 do {
2325 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2326
2327 err = nilfs_segctor_begin_construction(sci, nilfs);
2328 if (unlikely(err))
2329 goto out;
2330
2331 /* Update time stamp */
2332 sci->sc_seg_ctime = get_seconds();
2333
2334 err = nilfs_segctor_collect(sci, nilfs, mode);
2335 if (unlikely(err))
2336 goto failed;
2337
2338 has_sr = (sci->sc_super_root != NULL);
2339
2340 /* Avoid empty segment */
2341 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
2342 NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
2343 BUG_ON(mode == SC_LSEG_SR);
2344 nilfs_segctor_end_construction(sci, nilfs, 1);
2345 goto out;
2346 }
2347
2348 err = nilfs_segctor_assign(sci, mode);
2349 if (unlikely(err))
2350 goto failed;
2351
2352 if (has_sr) {
2353 err = nilfs_segctor_deactivate_segments(sci, nilfs);
2354 if (unlikely(err))
2355 goto failed;
2356 }
2357 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2358 nilfs_segctor_fill_in_file_bmap(sci, sbi->s_ifile);
2359
2360 if (has_sr) {
2361 err = nilfs_segctor_fill_in_checkpoint(sci);
2362 if (unlikely(err))
2363 goto failed_to_make_up;
2364
2365 nilfs_segctor_fill_in_super_root(sci, nilfs);
2366 }
2367 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2368
2369 /* Write partial segments */
2370 err = nilfs_segctor_prepare_write(sci, &failed_page);
2371 if (unlikely(err))
2372 goto failed_to_write;
2373
2374 nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed);
2375
2376 err = nilfs_segctor_write(sci, nilfs->ns_bdi);
2377 if (unlikely(err))
2378 goto failed_to_write;
2379
2380 nilfs_segctor_complete_write(sci);
2381
2382 /* Commit segments */
2383 nilfs_segctor_bead_completed_segments(sci);
2384 if (has_sr) {
2385 down_write(&nilfs->ns_sem);
2386 nilfs_update_last_segment(sbi, 1);
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002387 up_write(&nilfs->ns_sem);
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -07002388 nilfs_segctor_commit_deactivate_segments(sci, nilfs);
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002389 nilfs_segctor_commit_free_segments(sci);
2390 nilfs_segctor_clear_metadata_dirty(sci);
2391 }
2392
2393 nilfs_segctor_end_construction(sci, nilfs, 0);
2394
2395 } while (sci->sc_stage.scnt != NILFS_ST_DONE);
2396
2397 /* Clearing sketch data */
2398 if (has_sr && sci->sc_sketch_inode) {
2399 if (i_size_read(sci->sc_sketch_inode) == 0)
2400 clear_bit(NILFS_I_DIRTY,
2401 &NILFS_I(sci->sc_sketch_inode)->i_state);
2402 i_size_write(sci->sc_sketch_inode, 0);
2403 }
2404 out:
2405 nilfs_segctor_destroy_segment_buffers(sci);
2406 nilfs_segctor_check_out_files(sci, sbi);
2407 return err;
2408
2409 failed_to_write:
2410 nilfs_segctor_abort_write(sci, failed_page, err);
2411 nilfs_segctor_cancel_segusage(sci, nilfs->ns_sufile);
2412
2413 failed_to_make_up:
2414 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2415 nilfs_redirty_inodes(&sci->sc_dirty_files);
2416 if (has_sr)
2417 nilfs_segctor_reactivate_segments(sci, nilfs);
2418
2419 failed:
2420 if (nilfs_doing_gc())
2421 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2422 nilfs_segctor_end_construction(sci, nilfs, err);
2423 goto out;
2424}
2425
2426/**
2427 * nilfs_secgtor_start_timer - set timer of background write
2428 * @sci: nilfs_sc_info
2429 *
2430 * If the timer has already been set, it ignores the new request.
2431 * This function MUST be called within a section locking the segment
2432 * semaphore.
2433 */
2434static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2435{
2436 spin_lock(&sci->sc_state_lock);
2437 if (sci->sc_timer && !(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2438 sci->sc_timer->expires = jiffies + sci->sc_interval;
2439 add_timer(sci->sc_timer);
2440 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2441 }
2442 spin_unlock(&sci->sc_state_lock);
2443}
2444
2445static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2446{
2447 spin_lock(&sci->sc_state_lock);
2448 if (!(sci->sc_flush_request & (1 << bn))) {
2449 unsigned long prev_req = sci->sc_flush_request;
2450
2451 sci->sc_flush_request |= (1 << bn);
2452 if (!prev_req)
2453 wake_up(&sci->sc_wait_daemon);
2454 }
2455 spin_unlock(&sci->sc_state_lock);
2456}
2457
2458/**
2459 * nilfs_flush_segment - trigger a segment construction for resource control
2460 * @sb: super block
2461 * @ino: inode number of the file to be flushed out.
2462 */
2463void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2464{
2465 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2466 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2467
2468 if (!sci || nilfs_doing_construction())
2469 return;
2470 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2471 /* assign bit 0 to data files */
2472}
2473
2474int nilfs_segctor_add_segments_to_be_freed(struct nilfs_sc_info *sci,
2475 __u64 *segnum, size_t nsegs)
2476{
2477 struct nilfs_segment_entry *ent;
2478 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
2479 struct inode *sufile = nilfs->ns_sufile;
2480 LIST_HEAD(list);
2481 __u64 *pnum;
2482 const char *flag_name;
2483 size_t i;
2484 int err, err2 = 0;
2485
2486 for (pnum = segnum, i = 0; i < nsegs; pnum++, i++) {
2487 ent = nilfs_alloc_segment_entry(*pnum);
2488 if (unlikely(!ent)) {
2489 err = -ENOMEM;
2490 goto failed;
2491 }
2492 list_add_tail(&ent->list, &list);
2493
2494 err = nilfs_open_segment_entry(ent, sufile);
2495 if (unlikely(err))
2496 goto failed;
2497
2498 if (unlikely(le32_to_cpu(ent->raw_su->su_flags) !=
2499 (1UL << NILFS_SEGMENT_USAGE_DIRTY))) {
2500 if (nilfs_segment_usage_clean(ent->raw_su))
2501 flag_name = "clean";
2502 else if (nilfs_segment_usage_active(ent->raw_su))
2503 flag_name = "active";
2504 else if (nilfs_segment_usage_volatile_active(
2505 ent->raw_su))
2506 flag_name = "volatile active";
2507 else if (!nilfs_segment_usage_dirty(ent->raw_su))
2508 flag_name = "non-dirty";
2509 else
2510 flag_name = "erroneous";
2511
2512 printk(KERN_ERR
2513 "NILFS: %s segment is requested to be cleaned "
2514 "(segnum=%llu)\n",
2515 flag_name, (unsigned long long)ent->segnum);
2516 err2 = -EINVAL;
2517 }
2518 nilfs_close_segment_entry(ent, sufile);
2519 }
2520 if (unlikely(err2)) {
2521 err = err2;
2522 goto failed;
2523 }
2524 list_splice(&list, sci->sc_cleaning_segments.prev);
2525 return 0;
2526
2527 failed:
2528 nilfs_dispose_segment_list(&list);
2529 return err;
2530}
2531
2532void nilfs_segctor_clear_segments_to_be_freed(struct nilfs_sc_info *sci)
2533{
2534 nilfs_dispose_segment_list(&sci->sc_cleaning_segments);
2535}
2536
2537struct nilfs_segctor_wait_request {
2538 wait_queue_t wq;
2539 __u32 seq;
2540 int err;
2541 atomic_t done;
2542};
2543
2544static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2545{
2546 struct nilfs_segctor_wait_request wait_req;
2547 int err = 0;
2548
2549 spin_lock(&sci->sc_state_lock);
2550 init_wait(&wait_req.wq);
2551 wait_req.err = 0;
2552 atomic_set(&wait_req.done, 0);
2553 wait_req.seq = ++sci->sc_seq_request;
2554 spin_unlock(&sci->sc_state_lock);
2555
2556 init_waitqueue_entry(&wait_req.wq, current);
2557 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2558 set_current_state(TASK_INTERRUPTIBLE);
2559 wake_up(&sci->sc_wait_daemon);
2560
2561 for (;;) {
2562 if (atomic_read(&wait_req.done)) {
2563 err = wait_req.err;
2564 break;
2565 }
2566 if (!signal_pending(current)) {
2567 schedule();
2568 continue;
2569 }
2570 err = -ERESTARTSYS;
2571 break;
2572 }
2573 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2574 return err;
2575}
2576
2577static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2578{
2579 struct nilfs_segctor_wait_request *wrq, *n;
2580 unsigned long flags;
2581
2582 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2583 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2584 wq.task_list) {
2585 if (!atomic_read(&wrq->done) &&
2586 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2587 wrq->err = err;
2588 atomic_set(&wrq->done, 1);
2589 }
2590 if (atomic_read(&wrq->done)) {
2591 wrq->wq.func(&wrq->wq,
2592 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2593 0, NULL);
2594 }
2595 }
2596 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2597}
2598
2599/**
2600 * nilfs_construct_segment - construct a logical segment
2601 * @sb: super block
2602 *
2603 * Return Value: On success, 0 is retured. On errors, one of the following
2604 * negative error code is returned.
2605 *
2606 * %-EROFS - Read only filesystem.
2607 *
2608 * %-EIO - I/O error
2609 *
2610 * %-ENOSPC - No space left on device (only in a panic state).
2611 *
2612 * %-ERESTARTSYS - Interrupted.
2613 *
2614 * %-ENOMEM - Insufficient memory available.
2615 */
2616int nilfs_construct_segment(struct super_block *sb)
2617{
2618 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2619 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2620 struct nilfs_transaction_info *ti;
2621 int err;
2622
2623 if (!sci)
2624 return -EROFS;
2625
2626 /* A call inside transactions causes a deadlock. */
2627 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2628
2629 err = nilfs_segctor_sync(sci);
2630 return err;
2631}
2632
2633/**
2634 * nilfs_construct_dsync_segment - construct a data-only logical segment
2635 * @sb: super block
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07002636 * @inode: inode whose data blocks should be written out
2637 * @start: start byte offset
2638 * @end: end byte offset (inclusive)
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002639 *
2640 * Return Value: On success, 0 is retured. On errors, one of the following
2641 * negative error code is returned.
2642 *
2643 * %-EROFS - Read only filesystem.
2644 *
2645 * %-EIO - I/O error
2646 *
2647 * %-ENOSPC - No space left on device (only in a panic state).
2648 *
2649 * %-ERESTARTSYS - Interrupted.
2650 *
2651 * %-ENOMEM - Insufficient memory available.
2652 */
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07002653int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2654 loff_t start, loff_t end)
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002655{
2656 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2657 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2658 struct nilfs_inode_info *ii;
2659 struct nilfs_transaction_info ti;
2660 int err = 0;
2661
2662 if (!sci)
2663 return -EROFS;
2664
2665 nilfs_transaction_lock(sbi, &ti, 0);
2666
2667 ii = NILFS_I(inode);
2668 if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
2669 nilfs_test_opt(sbi, STRICT_ORDER) ||
2670 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2671 nilfs_discontinued(sbi->s_nilfs)) {
2672 nilfs_transaction_unlock(sbi);
2673 err = nilfs_segctor_sync(sci);
2674 return err;
2675 }
2676
2677 spin_lock(&sbi->s_inode_lock);
2678 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2679 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2680 spin_unlock(&sbi->s_inode_lock);
2681 nilfs_transaction_unlock(sbi);
2682 return 0;
2683 }
2684 spin_unlock(&sbi->s_inode_lock);
Ryusuke Konishif30bf3e2009-04-06 19:01:38 -07002685 sci->sc_dsync_inode = ii;
2686 sci->sc_dsync_start = start;
2687 sci->sc_dsync_end = end;
Ryusuke Konishi9ff051232009-04-06 19:01:37 -07002688
2689 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2690
2691 nilfs_transaction_unlock(sbi);
2692 return err;
2693}
2694
2695struct nilfs_segctor_req {
2696 int mode;
2697 __u32 seq_accepted;
2698 int sc_err; /* construction failure */
2699 int sb_err; /* super block writeback failure */
2700};
2701
2702#define FLUSH_FILE_BIT (0x1) /* data file only */
2703#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2704
2705static void nilfs_segctor_accept(struct nilfs_sc_info *sci,
2706 struct nilfs_segctor_req *req)
2707{
2708 BUG_ON(!sci);
2709
2710 req->sc_err = req->sb_err = 0;
2711 spin_lock(&sci->sc_state_lock);
2712 req->seq_accepted = sci->sc_seq_request;
2713 spin_unlock(&sci->sc_state_lock);
2714
2715 if (sci->sc_timer)
2716 del_timer_sync(sci->sc_timer);
2717}
2718
2719static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
2720 struct nilfs_segctor_req *req)
2721{
2722 /* Clear requests (even when the construction failed) */
2723 spin_lock(&sci->sc_state_lock);
2724
2725 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2726
2727 if (req->mode == SC_LSEG_SR) {
2728 sci->sc_seq_done = req->seq_accepted;
2729 nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err);
2730 sci->sc_flush_request = 0;
2731 } else if (req->mode == SC_FLUSH_FILE)
2732 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2733 else if (req->mode == SC_FLUSH_DAT)
2734 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2735
2736 spin_unlock(&sci->sc_state_lock);
2737}
2738
2739static int nilfs_segctor_construct(struct nilfs_sc_info *sci,
2740 struct nilfs_segctor_req *req)
2741{
2742 struct nilfs_sb_info *sbi = sci->sc_sbi;
2743 struct the_nilfs *nilfs = sbi->s_nilfs;
2744 int err = 0;
2745
2746 if (nilfs_discontinued(nilfs))
2747 req->mode = SC_LSEG_SR;
2748 if (!nilfs_segctor_confirm(sci)) {
2749 err = nilfs_segctor_do_construct(sci, req->mode);
2750 req->sc_err = err;
2751 }
2752 if (likely(!err)) {
2753 if (req->mode != SC_FLUSH_DAT)
2754 atomic_set(&nilfs->ns_ndirtyblks, 0);
2755 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2756 nilfs_discontinued(nilfs)) {
2757 down_write(&nilfs->ns_sem);
2758 req->sb_err = nilfs_commit_super(sbi);
2759 up_write(&nilfs->ns_sem);
2760 }
2761 }
2762 return err;
2763}
2764
2765static void nilfs_construction_timeout(unsigned long data)
2766{
2767 struct task_struct *p = (struct task_struct *)data;
2768 wake_up_process(p);
2769}
2770
2771static void
2772nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2773{
2774 struct nilfs_inode_info *ii, *n;
2775
2776 list_for_each_entry_safe(ii, n, head, i_dirty) {
2777 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2778 continue;
2779 hlist_del_init(&ii->vfs_inode.i_hash);
2780 list_del_init(&ii->i_dirty);
2781 nilfs_clear_gcinode(&ii->vfs_inode);
2782 }
2783}
2784
2785int nilfs_clean_segments(struct super_block *sb, void __user *argp)
2786{
2787 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2788 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2789 struct the_nilfs *nilfs = sbi->s_nilfs;
2790 struct nilfs_transaction_info ti;
2791 struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
2792 int err;
2793
2794 if (unlikely(!sci))
2795 return -EROFS;
2796
2797 nilfs_transaction_lock(sbi, &ti, 1);
2798
2799 err = nilfs_init_gcdat_inode(nilfs);
2800 if (unlikely(err))
2801 goto out_unlock;
2802 err = nilfs_ioctl_prepare_clean_segments(nilfs, argp);
2803 if (unlikely(err))
2804 goto out_unlock;
2805
2806 list_splice_init(&nilfs->ns_gc_inodes, sci->sc_gc_inodes.prev);
2807
2808 for (;;) {
2809 nilfs_segctor_accept(sci, &req);
2810 err = nilfs_segctor_construct(sci, &req);
2811 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2812 nilfs_segctor_notify(sci, &req);
2813
2814 if (likely(!err))
2815 break;
2816
2817 nilfs_warning(sb, __func__,
2818 "segment construction failed. (err=%d)", err);
2819 set_current_state(TASK_INTERRUPTIBLE);
2820 schedule_timeout(sci->sc_interval);
2821 }
2822
2823 out_unlock:
2824 nilfs_clear_gcdat_inode(nilfs);
2825 nilfs_transaction_unlock(sbi);
2826 return err;
2827}
2828
2829static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2830{
2831 struct nilfs_sb_info *sbi = sci->sc_sbi;
2832 struct nilfs_transaction_info ti;
2833 struct nilfs_segctor_req req = { .mode = mode };
2834
2835 nilfs_transaction_lock(sbi, &ti, 0);
2836
2837 nilfs_segctor_accept(sci, &req);
2838 nilfs_segctor_construct(sci, &req);
2839 nilfs_segctor_notify(sci, &req);
2840
2841 /*
2842 * Unclosed segment should be retried. We do this using sc_timer.
2843 * Timeout of sc_timer will invoke complete construction which leads
2844 * to close the current logical segment.
2845 */
2846 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2847 nilfs_segctor_start_timer(sci);
2848
2849 nilfs_transaction_unlock(sbi);
2850}
2851
2852static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2853{
2854 int mode = 0;
2855 int err;
2856
2857 spin_lock(&sci->sc_state_lock);
2858 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2859 SC_FLUSH_DAT : SC_FLUSH_FILE;
2860 spin_unlock(&sci->sc_state_lock);
2861
2862 if (mode) {
2863 err = nilfs_segctor_do_construct(sci, mode);
2864
2865 spin_lock(&sci->sc_state_lock);
2866 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2867 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2868 spin_unlock(&sci->sc_state_lock);
2869 }
2870 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2871}
2872
2873static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2874{
2875 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2876 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2877 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2878 return SC_FLUSH_FILE;
2879 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2880 return SC_FLUSH_DAT;
2881 }
2882 return SC_LSEG_SR;
2883}
2884
2885/**
2886 * nilfs_segctor_thread - main loop of the segment constructor thread.
2887 * @arg: pointer to a struct nilfs_sc_info.
2888 *
2889 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2890 * to execute segment constructions.
2891 */
2892static int nilfs_segctor_thread(void *arg)
2893{
2894 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2895 struct timer_list timer;
2896 int timeout = 0;
2897
2898 init_timer(&timer);
2899 timer.data = (unsigned long)current;
2900 timer.function = nilfs_construction_timeout;
2901 sci->sc_timer = &timer;
2902
2903 /* start sync. */
2904 sci->sc_task = current;
2905 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2906 printk(KERN_INFO
2907 "segctord starting. Construction interval = %lu seconds, "
2908 "CP frequency < %lu seconds\n",
2909 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2910
2911 spin_lock(&sci->sc_state_lock);
2912 loop:
2913 for (;;) {
2914 int mode;
2915
2916 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2917 goto end_thread;
2918
2919 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2920 mode = SC_LSEG_SR;
2921 else if (!sci->sc_flush_request)
2922 break;
2923 else
2924 mode = nilfs_segctor_flush_mode(sci);
2925
2926 spin_unlock(&sci->sc_state_lock);
2927 nilfs_segctor_thread_construct(sci, mode);
2928 spin_lock(&sci->sc_state_lock);
2929 timeout = 0;
2930 }
2931
2932
2933 if (freezing(current)) {
2934 spin_unlock(&sci->sc_state_lock);
2935 refrigerator();
2936 spin_lock(&sci->sc_state_lock);
2937 } else {
2938 DEFINE_WAIT(wait);
2939 int should_sleep = 1;
2940
2941 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2942 TASK_INTERRUPTIBLE);
2943
2944 if (sci->sc_seq_request != sci->sc_seq_done)
2945 should_sleep = 0;
2946 else if (sci->sc_flush_request)
2947 should_sleep = 0;
2948 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2949 should_sleep = time_before(jiffies,
2950 sci->sc_timer->expires);
2951
2952 if (should_sleep) {
2953 spin_unlock(&sci->sc_state_lock);
2954 schedule();
2955 spin_lock(&sci->sc_state_lock);
2956 }
2957 finish_wait(&sci->sc_wait_daemon, &wait);
2958 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2959 time_after_eq(jiffies, sci->sc_timer->expires));
2960 }
2961 goto loop;
2962
2963 end_thread:
2964 spin_unlock(&sci->sc_state_lock);
2965 del_timer_sync(sci->sc_timer);
2966 sci->sc_timer = NULL;
2967
2968 /* end sync. */
2969 sci->sc_task = NULL;
2970 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2971 return 0;
2972}
2973
2974static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2975{
2976 struct task_struct *t;
2977
2978 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2979 if (IS_ERR(t)) {
2980 int err = PTR_ERR(t);
2981
2982 printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
2983 err);
2984 return err;
2985 }
2986 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2987 return 0;
2988}
2989
2990static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2991{
2992 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2993
2994 while (sci->sc_task) {
2995 wake_up(&sci->sc_wait_daemon);
2996 spin_unlock(&sci->sc_state_lock);
2997 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2998 spin_lock(&sci->sc_state_lock);
2999 }
3000}
3001
3002static int nilfs_segctor_init(struct nilfs_sc_info *sci,
3003 struct nilfs_recovery_info *ri)
3004{
3005 int err;
3006 struct inode *inode = nilfs_iget(sci->sc_super, NILFS_SKETCH_INO);
3007
3008 sci->sc_sketch_inode = IS_ERR(inode) ? NULL : inode;
3009 if (sci->sc_sketch_inode)
3010 i_size_write(sci->sc_sketch_inode, 0);
3011
3012 sci->sc_seq_done = sci->sc_seq_request;
3013 if (ri)
3014 list_splice_init(&ri->ri_used_segments,
3015 sci->sc_active_segments.prev);
3016
3017 err = nilfs_segctor_start_thread(sci);
3018 if (err) {
3019 if (ri)
3020 list_splice_init(&sci->sc_active_segments,
3021 ri->ri_used_segments.prev);
3022 if (sci->sc_sketch_inode) {
3023 iput(sci->sc_sketch_inode);
3024 sci->sc_sketch_inode = NULL;
3025 }
3026 }
3027 return err;
3028}
3029
3030/*
3031 * Setup & clean-up functions
3032 */
3033static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi)
3034{
3035 struct nilfs_sc_info *sci;
3036
3037 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
3038 if (!sci)
3039 return NULL;
3040
3041 sci->sc_sbi = sbi;
3042 sci->sc_super = sbi->s_super;
3043
3044 init_waitqueue_head(&sci->sc_wait_request);
3045 init_waitqueue_head(&sci->sc_wait_daemon);
3046 init_waitqueue_head(&sci->sc_wait_task);
3047 spin_lock_init(&sci->sc_state_lock);
3048 INIT_LIST_HEAD(&sci->sc_dirty_files);
3049 INIT_LIST_HEAD(&sci->sc_segbufs);
3050 INIT_LIST_HEAD(&sci->sc_gc_inodes);
3051 INIT_LIST_HEAD(&sci->sc_active_segments);
3052 INIT_LIST_HEAD(&sci->sc_cleaning_segments);
3053 INIT_LIST_HEAD(&sci->sc_copied_buffers);
3054
3055 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
3056 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
3057 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
3058
3059 if (sbi->s_interval)
3060 sci->sc_interval = sbi->s_interval;
3061 if (sbi->s_watermark)
3062 sci->sc_watermark = sbi->s_watermark;
3063 return sci;
3064}
3065
3066static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
3067{
3068 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
3069
3070 /* The segctord thread was stopped and its timer was removed.
3071 But some tasks remain. */
3072 do {
3073 struct nilfs_sb_info *sbi = sci->sc_sbi;
3074 struct nilfs_transaction_info ti;
3075 struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
3076
3077 nilfs_transaction_lock(sbi, &ti, 0);
3078 nilfs_segctor_accept(sci, &req);
3079 ret = nilfs_segctor_construct(sci, &req);
3080 nilfs_segctor_notify(sci, &req);
3081 nilfs_transaction_unlock(sbi);
3082
3083 } while (ret && retrycount-- > 0);
3084}
3085
3086/**
3087 * nilfs_segctor_destroy - destroy the segment constructor.
3088 * @sci: nilfs_sc_info
3089 *
3090 * nilfs_segctor_destroy() kills the segctord thread and frees
3091 * the nilfs_sc_info struct.
3092 * Caller must hold the segment semaphore.
3093 */
3094static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
3095{
3096 struct nilfs_sb_info *sbi = sci->sc_sbi;
3097 int flag;
3098
3099 up_write(&sbi->s_nilfs->ns_segctor_sem);
3100
3101 spin_lock(&sci->sc_state_lock);
3102 nilfs_segctor_kill_thread(sci);
3103 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
3104 || sci->sc_seq_request != sci->sc_seq_done);
3105 spin_unlock(&sci->sc_state_lock);
3106
3107 if (flag || nilfs_segctor_confirm(sci))
3108 nilfs_segctor_write_out(sci);
3109
3110 BUG_ON(!list_empty(&sci->sc_copied_buffers));
3111
3112 if (!list_empty(&sci->sc_dirty_files)) {
3113 nilfs_warning(sbi->s_super, __func__,
3114 "dirty file(s) after the final construction\n");
3115 nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1);
3116 }
3117 if (!list_empty(&sci->sc_active_segments))
3118 nilfs_dispose_segment_list(&sci->sc_active_segments);
3119
3120 if (!list_empty(&sci->sc_cleaning_segments))
3121 nilfs_dispose_segment_list(&sci->sc_cleaning_segments);
3122
3123 BUG_ON(!list_empty(&sci->sc_segbufs));
3124
3125 if (sci->sc_sketch_inode) {
3126 iput(sci->sc_sketch_inode);
3127 sci->sc_sketch_inode = NULL;
3128 }
3129 down_write(&sbi->s_nilfs->ns_segctor_sem);
3130
3131 kfree(sci);
3132}
3133
3134/**
3135 * nilfs_attach_segment_constructor - attach a segment constructor
3136 * @sbi: nilfs_sb_info
3137 * @ri: nilfs_recovery_info
3138 *
3139 * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
3140 * initilizes it, and starts the segment constructor.
3141 *
3142 * Return Value: On success, 0 is returned. On error, one of the following
3143 * negative error code is returned.
3144 *
3145 * %-ENOMEM - Insufficient memory available.
3146 */
3147int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi,
3148 struct nilfs_recovery_info *ri)
3149{
3150 struct the_nilfs *nilfs = sbi->s_nilfs;
3151 int err;
3152
3153 /* Each field of nilfs_segctor is cleared through the initialization
3154 of super-block info */
3155 sbi->s_sc_info = nilfs_segctor_new(sbi);
3156 if (!sbi->s_sc_info)
3157 return -ENOMEM;
3158
3159 nilfs_attach_writer(nilfs, sbi);
3160 err = nilfs_segctor_init(NILFS_SC(sbi), ri);
3161 if (err) {
3162 nilfs_detach_writer(nilfs, sbi);
3163 kfree(sbi->s_sc_info);
3164 sbi->s_sc_info = NULL;
3165 }
3166 return err;
3167}
3168
3169/**
3170 * nilfs_detach_segment_constructor - destroy the segment constructor
3171 * @sbi: nilfs_sb_info
3172 *
3173 * nilfs_detach_segment_constructor() kills the segment constructor daemon,
3174 * frees the struct nilfs_sc_info, and destroy the dirty file list.
3175 */
3176void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi)
3177{
3178 struct the_nilfs *nilfs = sbi->s_nilfs;
3179 LIST_HEAD(garbage_list);
3180
3181 down_write(&nilfs->ns_segctor_sem);
3182 if (NILFS_SC(sbi)) {
3183 nilfs_segctor_destroy(NILFS_SC(sbi));
3184 sbi->s_sc_info = NULL;
3185 }
3186
3187 /* Force to free the list of dirty files */
3188 spin_lock(&sbi->s_inode_lock);
3189 if (!list_empty(&sbi->s_dirty_files)) {
3190 list_splice_init(&sbi->s_dirty_files, &garbage_list);
3191 nilfs_warning(sbi->s_super, __func__,
3192 "Non empty dirty list after the last "
3193 "segment construction\n");
3194 }
3195 spin_unlock(&sbi->s_inode_lock);
3196 up_write(&nilfs->ns_segctor_sem);
3197
3198 nilfs_dispose_list(sbi, &garbage_list, 1);
3199 nilfs_detach_writer(nilfs, sbi);
3200}