blob: 115de744bb337676db45f0c5349029b5799a0ea4 [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
14#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
19#include <linux/blkdev.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include "gc.h"
25#include <trace/events/f2fs.h>
26
27static struct kmem_cache *winode_slab;
28
29static int gc_thread_func(void *data)
30{
31 struct f2fs_sb_info *sbi = data;
32 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34 long wait_ms;
35
36 wait_ms = gc_th->min_sleep_time;
37
38 do {
39 if (try_to_freeze())
40 continue;
41 else
42 wait_event_interruptible_timeout(*wq,
43 kthread_should_stop(),
44 msecs_to_jiffies(wait_ms));
45 if (kthread_should_stop())
46 break;
47
Linus Torvalds8005ecc2012-12-20 13:54:51 -080048 /*
49 * [GC triggering condition]
50 * 0. GC is not conducted currently.
51 * 1. There are enough dirty segments.
52 * 2. IO subsystem is idle by checking the # of writeback pages.
53 * 3. IO subsystem is idle by checking the # of requests in
54 * bdev's request list.
55 *
56 * Note) We have to avoid triggering GCs too much frequently.
57 * Because it is possible that some segments can be
58 * invalidated soon after by user update or deletion.
59 * So, I'd like to wait some time to collect dirty segments.
60 */
61 if (!mutex_trylock(&sbi->gc_mutex))
62 continue;
63
64 if (!is_idle(sbi)) {
65 wait_ms = increase_sleep_time(gc_th, wait_ms);
66 mutex_unlock(&sbi->gc_mutex);
67 continue;
68 }
69
70 if (has_enough_invalid_blocks(sbi))
71 wait_ms = decrease_sleep_time(gc_th, wait_ms);
72 else
73 wait_ms = increase_sleep_time(gc_th, wait_ms);
74
75 stat_inc_bggc_count(sbi);
76
77 /* if return value is not zero, no victim was selected */
78 if (f2fs_gc(sbi))
79 wait_ms = gc_th->no_gc_sleep_time;
80
81 /* balancing f2fs's metadata periodically */
82 f2fs_balance_fs_bg(sbi);
83
84 } while (!kthread_should_stop());
85 return 0;
86}
87
88int start_gc_thread(struct f2fs_sb_info *sbi)
89{
90 struct f2fs_gc_kthread *gc_th;
91 dev_t dev = sbi->sb->s_bdev->bd_dev;
92 int err = 0;
93
94 if (!test_opt(sbi, BG_GC))
95 goto out;
96 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
97 if (!gc_th) {
98 err = -ENOMEM;
99 goto out;
100 }
101
102 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
103 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
104 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
105
106 gc_th->gc_idle = 0;
107
108 sbi->gc_thread = gc_th;
109 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
110 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
111 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
112 if (IS_ERR(gc_th->f2fs_gc_task)) {
113 err = PTR_ERR(gc_th->f2fs_gc_task);
114 kfree(gc_th);
115 sbi->gc_thread = NULL;
116 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800117out:
118 return err;
119}
120
121void stop_gc_thread(struct f2fs_sb_info *sbi)
122{
123 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
124 if (!gc_th)
125 return;
126 kthread_stop(gc_th->f2fs_gc_task);
127 kfree(gc_th);
128 sbi->gc_thread = NULL;
129}
130
131static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
132{
133 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
134
135 if (gc_th && gc_th->gc_idle) {
136 if (gc_th->gc_idle == 1)
137 gc_mode = GC_CB;
138 else if (gc_th->gc_idle == 2)
139 gc_mode = GC_GREEDY;
140 }
141 return gc_mode;
142}
143
144static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
145 int type, struct victim_sel_policy *p)
146{
147 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
148
149 if (p->alloc_mode == SSR) {
150 p->gc_mode = GC_GREEDY;
151 p->dirty_segmap = dirty_i->dirty_segmap[type];
152 p->max_search = dirty_i->nr_dirty[type];
153 p->ofs_unit = 1;
154 } else {
155 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
156 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
157 p->max_search = dirty_i->nr_dirty[DIRTY];
158 p->ofs_unit = sbi->segs_per_sec;
159 }
160
Changman Leeb1a94e82013-11-15 10:42:51 +0900161 if (p->max_search > sbi->max_victim_search)
162 p->max_search = sbi->max_victim_search;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800163
164 p->offset = sbi->last_victim[p->gc_mode];
165}
166
167static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
168 struct victim_sel_policy *p)
169{
170 /* SSR allocates in a segment unit */
171 if (p->alloc_mode == SSR)
172 return 1 << sbi->log_blocks_per_seg;
173 if (p->gc_mode == GC_GREEDY)
174 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
175 else if (p->gc_mode == GC_CB)
176 return UINT_MAX;
177 else /* No other gc_mode */
178 return 0;
179}
180
181static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
182{
183 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
184 unsigned int hint = 0;
185 unsigned int secno;
186
187 /*
188 * If the gc_type is FG_GC, we can select victim segments
189 * selected by background GC before.
190 * Those segments guarantee they have small valid blocks.
191 */
192next:
193 secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
194 if (secno < TOTAL_SECS(sbi)) {
195 if (sec_usage_check(sbi, secno))
196 goto next;
197 clear_bit(secno, dirty_i->victim_secmap);
198 return secno * sbi->segs_per_sec;
199 }
200 return NULL_SEGNO;
201}
202
203static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
204{
205 struct sit_info *sit_i = SIT_I(sbi);
206 unsigned int secno = GET_SECNO(sbi, segno);
207 unsigned int start = secno * sbi->segs_per_sec;
208 unsigned long long mtime = 0;
209 unsigned int vblocks;
210 unsigned char age = 0;
211 unsigned char u;
212 unsigned int i;
213
214 for (i = 0; i < sbi->segs_per_sec; i++)
215 mtime += get_seg_entry(sbi, start + i)->mtime;
216 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
217
218 mtime = div_u64(mtime, sbi->segs_per_sec);
219 vblocks = div_u64(vblocks, sbi->segs_per_sec);
220
221 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
222
223 /* Handle if the system time is changed by user */
224 if (mtime < sit_i->min_mtime)
225 sit_i->min_mtime = mtime;
226 if (mtime > sit_i->max_mtime)
227 sit_i->max_mtime = mtime;
228 if (sit_i->max_mtime != sit_i->min_mtime)
229 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
230 sit_i->max_mtime - sit_i->min_mtime);
231
232 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
233}
234
235static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
236 unsigned int segno, struct victim_sel_policy *p)
237{
238 if (p->alloc_mode == SSR)
239 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
240
241 /* alloc_mode == LFS */
242 if (p->gc_mode == GC_GREEDY)
243 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
244 else
245 return get_cb_cost(sbi, segno);
246}
247
248/*
249 * This function is called from two pathes.
250 * One is garbage collection and the other is SSR segment selection.
251 * When it is called during GC, it just gets a victim segment
252 * and it does not remove it from dirty seglist.
253 * When it is called from SSR segment selection, it finds a segment
254 * which has minimum valid blocks and removes it from dirty seglist.
255 */
256static int get_victim_by_default(struct f2fs_sb_info *sbi,
257 unsigned int *result, int gc_type, int type, char alloc_mode)
258{
259 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
260 struct victim_sel_policy p;
261 unsigned int secno, max_cost;
262 int nsearched = 0;
263
264 p.alloc_mode = alloc_mode;
265 select_policy(sbi, gc_type, type, &p);
266
267 p.min_segno = NULL_SEGNO;
268 p.min_cost = max_cost = get_max_cost(sbi, &p);
269
270 mutex_lock(&dirty_i->seglist_lock);
271
272 if (p.alloc_mode == LFS && gc_type == FG_GC) {
273 p.min_segno = check_bg_victims(sbi);
274 if (p.min_segno != NULL_SEGNO)
275 goto got_it;
276 }
277
278 while (1) {
279 unsigned long cost;
280 unsigned int segno;
281
282 segno = find_next_bit(p.dirty_segmap,
283 TOTAL_SEGS(sbi), p.offset);
284 if (segno >= TOTAL_SEGS(sbi)) {
285 if (sbi->last_victim[p.gc_mode]) {
286 sbi->last_victim[p.gc_mode] = 0;
287 p.offset = 0;
288 continue;
289 }
290 break;
291 }
292
293 p.offset = segno + p.ofs_unit;
294 if (p.ofs_unit > 1)
295 p.offset -= segno % p.ofs_unit;
296
297 secno = GET_SECNO(sbi, segno);
298
299 if (sec_usage_check(sbi, secno))
300 continue;
301 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
302 continue;
303
304 cost = get_gc_cost(sbi, segno, &p);
305
306 if (p.min_cost > cost) {
307 p.min_segno = segno;
308 p.min_cost = cost;
309 } else if (unlikely(cost == max_cost)) {
310 continue;
311 }
312
313 if (nsearched++ >= p.max_search) {
314 sbi->last_victim[p.gc_mode] = segno;
315 break;
316 }
317 }
318 if (p.min_segno != NULL_SEGNO) {
319got_it:
320 if (p.alloc_mode == LFS) {
321 secno = GET_SECNO(sbi, p.min_segno);
322 if (gc_type == FG_GC)
323 sbi->cur_victim_sec = secno;
324 else
325 set_bit(secno, dirty_i->victim_secmap);
326 }
327 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
328
329 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
330 sbi->cur_victim_sec,
331 prefree_segments(sbi), free_segments(sbi));
332 }
333 mutex_unlock(&dirty_i->seglist_lock);
334
335 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
336}
337
338static const struct victim_selection default_v_ops = {
339 .get_victim = get_victim_by_default,
340};
341
342static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
343{
344 struct inode_entry *ie;
345
346 list_for_each_entry(ie, ilist, list)
347 if (ie->inode->i_ino == ino)
348 return ie->inode;
349 return NULL;
350}
351
352static void add_gc_inode(struct inode *inode, struct list_head *ilist)
353{
354 struct inode_entry *new_ie;
355
356 if (inode == find_gc_inode(inode->i_ino, ilist)) {
357 iput(inode);
358 return;
359 }
360
361 new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
362 new_ie->inode = inode;
363 list_add_tail(&new_ie->list, ilist);
364}
365
366static void put_gc_inode(struct list_head *ilist)
367{
368 struct inode_entry *ie, *next_ie;
369 list_for_each_entry_safe(ie, next_ie, ilist, list) {
370 iput(ie->inode);
371 list_del(&ie->list);
372 kmem_cache_free(winode_slab, ie);
373 }
374}
375
376static int check_valid_map(struct f2fs_sb_info *sbi,
377 unsigned int segno, int offset)
378{
379 struct sit_info *sit_i = SIT_I(sbi);
380 struct seg_entry *sentry;
381 int ret;
382
383 mutex_lock(&sit_i->sentry_lock);
384 sentry = get_seg_entry(sbi, segno);
385 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
386 mutex_unlock(&sit_i->sentry_lock);
387 return ret;
388}
389
390/*
391 * This function compares node address got in summary with that in NAT.
392 * On validity, copy that node with cold status, otherwise (invalid node)
393 * ignore that.
394 */
395static void gc_node_segment(struct f2fs_sb_info *sbi,
396 struct f2fs_summary *sum, unsigned int segno, int gc_type)
397{
398 bool initial = true;
399 struct f2fs_summary *entry;
400 int off;
401
402next_step:
403 entry = sum;
404
405 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
406 nid_t nid = le32_to_cpu(entry->nid);
407 struct page *node_page;
408
409 /* stop BG_GC if there is not enough free sections. */
410 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
411 return;
412
413 if (check_valid_map(sbi, segno, off) == 0)
414 continue;
415
416 if (initial) {
417 ra_node_page(sbi, nid);
418 continue;
419 }
420 node_page = get_node_page(sbi, nid);
421 if (IS_ERR(node_page))
422 continue;
423
424 /* set page dirty and write it */
425 if (gc_type == FG_GC) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900426 f2fs_wait_on_page_writeback(node_page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800427 set_page_dirty(node_page);
428 } else {
429 if (!PageWriteback(node_page))
430 set_page_dirty(node_page);
431 }
432 f2fs_put_page(node_page, 1);
433 stat_inc_node_blk_count(sbi, 1);
434 }
435
436 if (initial) {
437 initial = false;
438 goto next_step;
439 }
440
441 if (gc_type == FG_GC) {
442 struct writeback_control wbc = {
443 .sync_mode = WB_SYNC_ALL,
444 .nr_to_write = LONG_MAX,
445 .for_reclaim = 0,
446 };
447 sync_node_pages(sbi, 0, &wbc);
448
449 /*
450 * In the case of FG_GC, it'd be better to reclaim this victim
451 * completely.
452 */
453 if (get_valid_blocks(sbi, segno, 1) != 0)
454 goto next_step;
455 }
456}
457
458/*
459 * Calculate start block index indicating the given node offset.
460 * Be careful, caller should give this node offset only indicating direct node
461 * blocks. If any node offsets, which point the other types of node blocks such
462 * as indirect or double indirect node blocks, are given, it must be a caller's
463 * bug.
464 */
465block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
466{
467 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
468 unsigned int bidx;
469
470 if (node_ofs == 0)
471 return 0;
472
473 if (node_ofs <= 2) {
474 bidx = node_ofs - 1;
475 } else if (node_ofs <= indirect_blks) {
476 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
477 bidx = node_ofs - 2 - dec;
478 } else {
479 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
480 bidx = node_ofs - 5 - dec;
481 }
482 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
483}
484
485static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
486 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
487{
488 struct page *node_page;
489 nid_t nid;
490 unsigned int ofs_in_node;
491 block_t source_blkaddr;
492
493 nid = le32_to_cpu(sum->nid);
494 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
495
496 node_page = get_node_page(sbi, nid);
497 if (IS_ERR(node_page))
498 return 0;
499
500 get_node_info(sbi, nid, dni);
501
502 if (sum->version != dni->version) {
503 f2fs_put_page(node_page, 1);
504 return 0;
505 }
506
507 *nofs = ofs_of_node(node_page);
508 source_blkaddr = datablock_addr(node_page, ofs_in_node);
509 f2fs_put_page(node_page, 1);
510
511 if (source_blkaddr != blkaddr)
512 return 0;
513 return 1;
514}
515
516static void move_data_page(struct inode *inode, struct page *page, int gc_type)
517{
Changman Leeb1a94e82013-11-15 10:42:51 +0900518 struct f2fs_io_info fio = {
519 .type = DATA,
520 .rw = WRITE_SYNC,
521 };
522
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800523 if (gc_type == BG_GC) {
524 if (PageWriteback(page))
525 goto out;
526 set_page_dirty(page);
527 set_cold_data(page);
528 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900529 f2fs_wait_on_page_writeback(page, DATA);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800530
Jaegeuk Kim9694e662014-02-07 10:00:06 +0900531 if (clear_page_dirty_for_io(page))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800532 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800533 set_cold_data(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900534 do_write_data_page(page, &fio);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800535 clear_cold_data(page);
536 }
537out:
538 f2fs_put_page(page, 1);
539}
540
541/*
542 * This function tries to get parent node of victim data block, and identifies
543 * data block validity. If the block is valid, copy that with cold status and
544 * modify parent node.
545 * If the parent node is not valid or the data block address is different,
546 * the victim data block is ignored.
547 */
548static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
549 struct list_head *ilist, unsigned int segno, int gc_type)
550{
551 struct super_block *sb = sbi->sb;
552 struct f2fs_summary *entry;
553 block_t start_addr;
554 int off;
555 int phase = 0;
556
557 start_addr = START_BLOCK(sbi, segno);
558
559next_step:
560 entry = sum;
561
562 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
563 struct page *data_page;
564 struct inode *inode;
565 struct node_info dni; /* dnode info for the data */
566 unsigned int ofs_in_node, nofs;
567 block_t start_bidx;
568
569 /* stop BG_GC if there is not enough free sections. */
570 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
571 return;
572
573 if (check_valid_map(sbi, segno, off) == 0)
574 continue;
575
576 if (phase == 0) {
577 ra_node_page(sbi, le32_to_cpu(entry->nid));
578 continue;
579 }
580
581 /* Get an inode by ino with checking validity */
582 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
583 continue;
584
585 if (phase == 1) {
586 ra_node_page(sbi, dni.ino);
587 continue;
588 }
589
590 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
591
592 if (phase == 2) {
593 inode = f2fs_iget(sb, dni.ino);
594 if (IS_ERR(inode))
595 continue;
596
597 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
598
599 data_page = find_data_page(inode,
600 start_bidx + ofs_in_node, false);
601 if (IS_ERR(data_page))
602 goto next_iput;
603
604 f2fs_put_page(data_page, 0);
605 add_gc_inode(inode, ilist);
606 } else {
607 inode = find_gc_inode(dni.ino, ilist);
608 if (inode) {
609 start_bidx = start_bidx_of_node(nofs,
610 F2FS_I(inode));
611 data_page = get_lock_data_page(inode,
612 start_bidx + ofs_in_node);
613 if (IS_ERR(data_page))
614 continue;
615 move_data_page(inode, data_page, gc_type);
616 stat_inc_data_blk_count(sbi, 1);
617 }
618 }
619 continue;
620next_iput:
621 iput(inode);
622 }
623
624 if (++phase < 4)
625 goto next_step;
626
627 if (gc_type == FG_GC) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900628 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800629
630 /*
631 * In the case of FG_GC, it'd be better to reclaim this victim
632 * completely.
633 */
634 if (get_valid_blocks(sbi, segno, 1) != 0) {
635 phase = 2;
636 goto next_step;
637 }
638 }
639}
640
641static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
642 int gc_type, int type)
643{
644 struct sit_info *sit_i = SIT_I(sbi);
645 int ret;
646 mutex_lock(&sit_i->sentry_lock);
647 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
648 mutex_unlock(&sit_i->sentry_lock);
649 return ret;
650}
651
652static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
653 struct list_head *ilist, int gc_type)
654{
655 struct page *sum_page;
656 struct f2fs_summary_block *sum;
657 struct blk_plug plug;
658
659 /* read segment summary of victim */
660 sum_page = get_sum_page(sbi, segno);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800661
662 blk_start_plug(&plug);
663
664 sum = page_address(sum_page);
665
666 switch (GET_SUM_TYPE((&sum->footer))) {
667 case SUM_TYPE_NODE:
668 gc_node_segment(sbi, sum->entries, segno, gc_type);
669 break;
670 case SUM_TYPE_DATA:
671 gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
672 break;
673 }
674 blk_finish_plug(&plug);
675
676 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
677 stat_inc_call_count(sbi->stat_info);
678
679 f2fs_put_page(sum_page, 1);
680}
681
682int f2fs_gc(struct f2fs_sb_info *sbi)
683{
684 struct list_head ilist;
685 unsigned int segno, i;
686 int gc_type = BG_GC;
687 int nfree = 0;
688 int ret = -1;
689
690 INIT_LIST_HEAD(&ilist);
691gc_more:
Changman Leeb1a94e82013-11-15 10:42:51 +0900692 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800693 goto stop;
Jaegeuk Kim4c57cbe2014-02-05 13:03:57 +0900694 if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
695 goto stop;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800696
697 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
698 gc_type = FG_GC;
699 write_checkpoint(sbi, false);
700 }
701
702 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
703 goto stop;
704 ret = 0;
705
Chao Yu99926a92014-02-27 19:12:24 +0800706 /* readahead multi ssa blocks those have contiguous address */
707 if (sbi->segs_per_sec > 1)
708 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
709 META_SSA);
710
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800711 for (i = 0; i < sbi->segs_per_sec; i++)
712 do_garbage_collect(sbi, segno + i, &ilist, gc_type);
713
714 if (gc_type == FG_GC) {
715 sbi->cur_victim_sec = NULL_SEGNO;
716 nfree++;
717 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
718 }
719
720 if (has_not_enough_free_secs(sbi, nfree))
721 goto gc_more;
722
723 if (gc_type == FG_GC)
724 write_checkpoint(sbi, false);
725stop:
726 mutex_unlock(&sbi->gc_mutex);
727
728 put_gc_inode(&ilist);
729 return ret;
730}
731
732void build_gc_manager(struct f2fs_sb_info *sbi)
733{
734 DIRTY_I(sbi)->v_ops = &default_v_ops;
735}
736
737int __init create_gc_caches(void)
738{
739 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
Gu Zhenge33dcea2014-03-07 18:43:28 +0800740 sizeof(struct inode_entry));
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800741 if (!winode_slab)
742 return -ENOMEM;
743 return 0;
744}
745
746void destroy_gc_caches(void)
747{
748 kmem_cache_destroy(winode_slab);
749}