blob: 8d293cb685ba1caec0d3e98af15af4242c3ef2cb [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
14#include <linux/proc_fs.h>
15#include <linux/init.h>
16#include <linux/f2fs_fs.h>
17#include <linux/kthread.h>
18#include <linux/delay.h>
19#include <linux/freezer.h>
20#include <linux/blkdev.h>
21
22#include "f2fs.h"
23#include "node.h"
24#include "segment.h"
25#include "gc.h"
26
27static struct kmem_cache *winode_slab;
28
29static int gc_thread_func(void *data)
30{
31 struct f2fs_sb_info *sbi = data;
32 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
33 long wait_ms;
34
35 wait_ms = GC_THREAD_MIN_SLEEP_TIME;
36
37 do {
38 if (try_to_freeze())
39 continue;
40 else
41 wait_event_interruptible_timeout(*wq,
42 kthread_should_stop(),
43 msecs_to_jiffies(wait_ms));
44 if (kthread_should_stop())
45 break;
46
Changman Leed6212a52013-01-29 18:30:07 +090047 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
48 wait_ms = GC_THREAD_MAX_SLEEP_TIME;
49 continue;
50 }
51
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090052 /*
53 * [GC triggering condition]
54 * 0. GC is not conducted currently.
55 * 1. There are enough dirty segments.
56 * 2. IO subsystem is idle by checking the # of writeback pages.
57 * 3. IO subsystem is idle by checking the # of requests in
58 * bdev's request list.
59 *
60 * Note) We have to avoid triggering GCs too much frequently.
61 * Because it is possible that some segments can be
62 * invalidated soon after by user update or deletion.
63 * So, I'd like to wait some time to collect dirty segments.
64 */
65 if (!mutex_trylock(&sbi->gc_mutex))
66 continue;
67
68 if (!is_idle(sbi)) {
69 wait_ms = increase_sleep_time(wait_ms);
70 mutex_unlock(&sbi->gc_mutex);
71 continue;
72 }
73
74 if (has_enough_invalid_blocks(sbi))
75 wait_ms = decrease_sleep_time(wait_ms);
76 else
77 wait_ms = increase_sleep_time(wait_ms);
78
79 sbi->bg_gc++;
80
Jaegeuk Kim408e9372013-01-03 17:55:52 +090081 if (f2fs_gc(sbi) == GC_NONE)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090082 wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
83 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
84 wait_ms = GC_THREAD_MAX_SLEEP_TIME;
85
86 } while (!kthread_should_stop());
87 return 0;
88}
89
90int start_gc_thread(struct f2fs_sb_info *sbi)
91{
Namjae Jeon1042d602012-12-01 10:56:13 +090092 struct f2fs_gc_kthread *gc_th;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090093
Changman Lee48600e42013-02-04 10:05:09 +090094 if (!test_opt(sbi, BG_GC))
95 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090096 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
97 if (!gc_th)
98 return -ENOMEM;
99
100 sbi->gc_thread = gc_th;
101 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
102 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
103 GC_THREAD_NAME);
104 if (IS_ERR(gc_th->f2fs_gc_task)) {
105 kfree(gc_th);
106 return -ENOMEM;
107 }
108 return 0;
109}
110
111void stop_gc_thread(struct f2fs_sb_info *sbi)
112{
113 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
114 if (!gc_th)
115 return;
116 kthread_stop(gc_th->f2fs_gc_task);
117 kfree(gc_th);
118 sbi->gc_thread = NULL;
119}
120
121static int select_gc_type(int gc_type)
122{
123 return (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
124}
125
126static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
127 int type, struct victim_sel_policy *p)
128{
129 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
130
131 if (p->alloc_mode) {
132 p->gc_mode = GC_GREEDY;
133 p->dirty_segmap = dirty_i->dirty_segmap[type];
134 p->ofs_unit = 1;
135 } else {
136 p->gc_mode = select_gc_type(gc_type);
137 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
138 p->ofs_unit = sbi->segs_per_sec;
139 }
140 p->offset = sbi->last_victim[p->gc_mode];
141}
142
143static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
144 struct victim_sel_policy *p)
145{
146 if (p->gc_mode == GC_GREEDY)
147 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
148 else if (p->gc_mode == GC_CB)
149 return UINT_MAX;
150 else /* No other gc_mode */
151 return 0;
152}
153
154static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
155{
156 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
157 unsigned int segno;
158
159 /*
160 * If the gc_type is FG_GC, we can select victim segments
161 * selected by background GC before.
162 * Those segments guarantee they have small valid blocks.
163 */
164 segno = find_next_bit(dirty_i->victim_segmap[BG_GC],
165 TOTAL_SEGS(sbi), 0);
166 if (segno < TOTAL_SEGS(sbi)) {
167 clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
168 return segno;
169 }
170 return NULL_SEGNO;
171}
172
173static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
174{
175 struct sit_info *sit_i = SIT_I(sbi);
176 unsigned int secno = GET_SECNO(sbi, segno);
177 unsigned int start = secno * sbi->segs_per_sec;
178 unsigned long long mtime = 0;
179 unsigned int vblocks;
180 unsigned char age = 0;
181 unsigned char u;
182 unsigned int i;
183
184 for (i = 0; i < sbi->segs_per_sec; i++)
185 mtime += get_seg_entry(sbi, start + i)->mtime;
186 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
187
188 mtime = div_u64(mtime, sbi->segs_per_sec);
189 vblocks = div_u64(vblocks, sbi->segs_per_sec);
190
191 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
192
193 /* Handle if the system time is changed by user */
194 if (mtime < sit_i->min_mtime)
195 sit_i->min_mtime = mtime;
196 if (mtime > sit_i->max_mtime)
197 sit_i->max_mtime = mtime;
198 if (sit_i->max_mtime != sit_i->min_mtime)
199 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
200 sit_i->max_mtime - sit_i->min_mtime);
201
202 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
203}
204
205static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
206 struct victim_sel_policy *p)
207{
208 if (p->alloc_mode == SSR)
209 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
210
211 /* alloc_mode == LFS */
212 if (p->gc_mode == GC_GREEDY)
213 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
214 else
215 return get_cb_cost(sbi, segno);
216}
217
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900218/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900219 * This function is called from two pathes.
220 * One is garbage collection and the other is SSR segment selection.
221 * When it is called during GC, it just gets a victim segment
222 * and it does not remove it from dirty seglist.
223 * When it is called from SSR segment selection, it finds a segment
224 * which has minimum valid blocks and removes it from dirty seglist.
225 */
226static int get_victim_by_default(struct f2fs_sb_info *sbi,
227 unsigned int *result, int gc_type, int type, char alloc_mode)
228{
229 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
230 struct victim_sel_policy p;
231 unsigned int segno;
232 int nsearched = 0;
233
234 p.alloc_mode = alloc_mode;
235 select_policy(sbi, gc_type, type, &p);
236
237 p.min_segno = NULL_SEGNO;
238 p.min_cost = get_max_cost(sbi, &p);
239
240 mutex_lock(&dirty_i->seglist_lock);
241
242 if (p.alloc_mode == LFS && gc_type == FG_GC) {
243 p.min_segno = check_bg_victims(sbi);
244 if (p.min_segno != NULL_SEGNO)
245 goto got_it;
246 }
247
248 while (1) {
249 unsigned long cost;
250
251 segno = find_next_bit(p.dirty_segmap,
252 TOTAL_SEGS(sbi), p.offset);
253 if (segno >= TOTAL_SEGS(sbi)) {
254 if (sbi->last_victim[p.gc_mode]) {
255 sbi->last_victim[p.gc_mode] = 0;
256 p.offset = 0;
257 continue;
258 }
259 break;
260 }
261 p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
262
263 if (test_bit(segno, dirty_i->victim_segmap[FG_GC]))
264 continue;
265 if (gc_type == BG_GC &&
266 test_bit(segno, dirty_i->victim_segmap[BG_GC]))
267 continue;
268 if (IS_CURSEC(sbi, GET_SECNO(sbi, segno)))
269 continue;
270
271 cost = get_gc_cost(sbi, segno, &p);
272
273 if (p.min_cost > cost) {
274 p.min_segno = segno;
275 p.min_cost = cost;
276 }
277
278 if (cost == get_max_cost(sbi, &p))
279 continue;
280
281 if (nsearched++ >= MAX_VICTIM_SEARCH) {
282 sbi->last_victim[p.gc_mode] = segno;
283 break;
284 }
285 }
286got_it:
287 if (p.min_segno != NULL_SEGNO) {
288 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
289 if (p.alloc_mode == LFS) {
290 int i;
291 for (i = 0; i < p.ofs_unit; i++)
292 set_bit(*result + i,
293 dirty_i->victim_segmap[gc_type]);
294 }
295 }
296 mutex_unlock(&dirty_i->seglist_lock);
297
298 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
299}
300
301static const struct victim_selection default_v_ops = {
302 .get_victim = get_victim_by_default,
303};
304
305static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
306{
307 struct list_head *this;
308 struct inode_entry *ie;
309
310 list_for_each(this, ilist) {
311 ie = list_entry(this, struct inode_entry, list);
312 if (ie->inode->i_ino == ino)
313 return ie->inode;
314 }
315 return NULL;
316}
317
318static void add_gc_inode(struct inode *inode, struct list_head *ilist)
319{
320 struct list_head *this;
321 struct inode_entry *new_ie, *ie;
322
323 list_for_each(this, ilist) {
324 ie = list_entry(this, struct inode_entry, list);
325 if (ie->inode == inode) {
326 iput(inode);
327 return;
328 }
329 }
330repeat:
331 new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
332 if (!new_ie) {
333 cond_resched();
334 goto repeat;
335 }
336 new_ie->inode = inode;
337 list_add_tail(&new_ie->list, ilist);
338}
339
340static void put_gc_inode(struct list_head *ilist)
341{
342 struct inode_entry *ie, *next_ie;
343 list_for_each_entry_safe(ie, next_ie, ilist, list) {
344 iput(ie->inode);
345 list_del(&ie->list);
346 kmem_cache_free(winode_slab, ie);
347 }
348}
349
350static int check_valid_map(struct f2fs_sb_info *sbi,
351 unsigned int segno, int offset)
352{
353 struct sit_info *sit_i = SIT_I(sbi);
354 struct seg_entry *sentry;
355 int ret;
356
357 mutex_lock(&sit_i->sentry_lock);
358 sentry = get_seg_entry(sbi, segno);
359 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
360 mutex_unlock(&sit_i->sentry_lock);
361 return ret ? GC_OK : GC_NEXT;
362}
363
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900364/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900365 * This function compares node address got in summary with that in NAT.
366 * On validity, copy that node with cold status, otherwise (invalid node)
367 * ignore that.
368 */
369static int gc_node_segment(struct f2fs_sb_info *sbi,
370 struct f2fs_summary *sum, unsigned int segno, int gc_type)
371{
372 bool initial = true;
373 struct f2fs_summary *entry;
374 int off;
375
376next_step:
377 entry = sum;
378 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
379 nid_t nid = le32_to_cpu(entry->nid);
380 struct page *node_page;
381 int err;
382
383 /*
384 * It makes sure that free segments are able to write
385 * all the dirty node pages before CP after this CP.
386 * So let's check the space of dirty node pages.
387 */
388 if (should_do_checkpoint(sbi)) {
389 mutex_lock(&sbi->cp_mutex);
390 block_operations(sbi);
391 return GC_BLOCKED;
392 }
393
394 err = check_valid_map(sbi, segno, off);
Jaegeuk Kim2b506382012-12-26 14:39:50 +0900395 if (err == GC_NEXT)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900396 continue;
397
398 if (initial) {
399 ra_node_page(sbi, nid);
400 continue;
401 }
402 node_page = get_node_page(sbi, nid);
403 if (IS_ERR(node_page))
404 continue;
405
406 /* set page dirty and write it */
407 if (!PageWriteback(node_page))
408 set_page_dirty(node_page);
409 f2fs_put_page(node_page, 1);
410 stat_inc_node_blk_count(sbi, 1);
411 }
412 if (initial) {
413 initial = false;
414 goto next_step;
415 }
416
417 if (gc_type == FG_GC) {
418 struct writeback_control wbc = {
419 .sync_mode = WB_SYNC_ALL,
420 .nr_to_write = LONG_MAX,
421 .for_reclaim = 0,
422 };
423 sync_node_pages(sbi, 0, &wbc);
424 }
425 return GC_DONE;
426}
427
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900428/*
Jaegeuk Kim9af45ef2013-01-21 17:34:21 +0900429 * Calculate start block index indicating the given node offset.
430 * Be careful, caller should give this node offset only indicating direct node
431 * blocks. If any node offsets, which point the other types of node blocks such
432 * as indirect or double indirect node blocks, are given, it must be a caller's
433 * bug.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900434 */
435block_t start_bidx_of_node(unsigned int node_ofs)
436{
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900437 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
438 unsigned int bidx;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900439
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900440 if (node_ofs == 0)
441 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900442
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900443 if (node_ofs <= 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900444 bidx = node_ofs - 1;
445 } else if (node_ofs <= indirect_blks) {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900446 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900447 bidx = node_ofs - 2 - dec;
448 } else {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900449 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900450 bidx = node_ofs - 5 - dec;
451 }
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900452 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900453}
454
455static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
456 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
457{
458 struct page *node_page;
459 nid_t nid;
460 unsigned int ofs_in_node;
461 block_t source_blkaddr;
462
463 nid = le32_to_cpu(sum->nid);
464 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
465
466 node_page = get_node_page(sbi, nid);
467 if (IS_ERR(node_page))
468 return GC_NEXT;
469
470 get_node_info(sbi, nid, dni);
471
472 if (sum->version != dni->version) {
473 f2fs_put_page(node_page, 1);
474 return GC_NEXT;
475 }
476
477 *nofs = ofs_of_node(node_page);
478 source_blkaddr = datablock_addr(node_page, ofs_in_node);
479 f2fs_put_page(node_page, 1);
480
481 if (source_blkaddr != blkaddr)
482 return GC_NEXT;
483 return GC_OK;
484}
485
486static void move_data_page(struct inode *inode, struct page *page, int gc_type)
487{
488 if (page->mapping != inode->i_mapping)
489 goto out;
490
491 if (inode != page->mapping->host)
492 goto out;
493
494 if (PageWriteback(page))
495 goto out;
496
497 if (gc_type == BG_GC) {
498 set_page_dirty(page);
499 set_cold_data(page);
500 } else {
501 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
502 mutex_lock_op(sbi, DATA_WRITE);
503 if (clear_page_dirty_for_io(page) &&
504 S_ISDIR(inode->i_mode)) {
505 dec_page_count(sbi, F2FS_DIRTY_DENTS);
506 inode_dec_dirty_dents(inode);
507 }
508 set_cold_data(page);
509 do_write_data_page(page);
510 mutex_unlock_op(sbi, DATA_WRITE);
511 clear_cold_data(page);
512 }
513out:
514 f2fs_put_page(page, 1);
515}
516
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900517/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900518 * This function tries to get parent node of victim data block, and identifies
519 * data block validity. If the block is valid, copy that with cold status and
520 * modify parent node.
521 * If the parent node is not valid or the data block address is different,
522 * the victim data block is ignored.
523 */
524static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
525 struct list_head *ilist, unsigned int segno, int gc_type)
526{
527 struct super_block *sb = sbi->sb;
528 struct f2fs_summary *entry;
529 block_t start_addr;
530 int err, off;
531 int phase = 0;
532
533 start_addr = START_BLOCK(sbi, segno);
534
535next_step:
536 entry = sum;
537 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
538 struct page *data_page;
539 struct inode *inode;
540 struct node_info dni; /* dnode info for the data */
541 unsigned int ofs_in_node, nofs;
542 block_t start_bidx;
543
544 /*
545 * It makes sure that free segments are able to write
546 * all the dirty node pages before CP after this CP.
547 * So let's check the space of dirty node pages.
548 */
549 if (should_do_checkpoint(sbi)) {
550 mutex_lock(&sbi->cp_mutex);
551 block_operations(sbi);
552 err = GC_BLOCKED;
553 goto stop;
554 }
555
556 err = check_valid_map(sbi, segno, off);
Jaegeuk Kim2b506382012-12-26 14:39:50 +0900557 if (err == GC_NEXT)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900558 continue;
559
560 if (phase == 0) {
561 ra_node_page(sbi, le32_to_cpu(entry->nid));
562 continue;
563 }
564
565 /* Get an inode by ino with checking validity */
566 err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs);
Jaegeuk Kim2b506382012-12-26 14:39:50 +0900567 if (err == GC_NEXT)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900568 continue;
569
570 if (phase == 1) {
571 ra_node_page(sbi, dni.ino);
572 continue;
573 }
574
575 start_bidx = start_bidx_of_node(nofs);
576 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
577
578 if (phase == 2) {
Jaegeuk Kimd4686d52013-01-31 15:36:04 +0900579 inode = f2fs_iget(sb, dni.ino);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900580 if (IS_ERR(inode))
581 continue;
582
583 data_page = find_data_page(inode,
584 start_bidx + ofs_in_node);
585 if (IS_ERR(data_page))
586 goto next_iput;
587
588 f2fs_put_page(data_page, 0);
589 add_gc_inode(inode, ilist);
590 } else {
591 inode = find_gc_inode(dni.ino, ilist);
592 if (inode) {
593 data_page = get_lock_data_page(inode,
594 start_bidx + ofs_in_node);
595 if (IS_ERR(data_page))
596 continue;
597 move_data_page(inode, data_page, gc_type);
598 stat_inc_data_blk_count(sbi, 1);
599 }
600 }
601 continue;
602next_iput:
603 iput(inode);
604 }
605 if (++phase < 4)
606 goto next_step;
607 err = GC_DONE;
608stop:
609 if (gc_type == FG_GC)
610 f2fs_submit_bio(sbi, DATA, true);
611 return err;
612}
613
614static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
615 int gc_type, int type)
616{
617 struct sit_info *sit_i = SIT_I(sbi);
618 int ret;
619 mutex_lock(&sit_i->sentry_lock);
620 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
621 mutex_unlock(&sit_i->sentry_lock);
622 return ret;
623}
624
625static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
626 struct list_head *ilist, int gc_type)
627{
628 struct page *sum_page;
629 struct f2fs_summary_block *sum;
630 int ret = GC_DONE;
631
632 /* read segment summary of victim */
633 sum_page = get_sum_page(sbi, segno);
634 if (IS_ERR(sum_page))
635 return GC_ERROR;
636
637 /*
638 * CP needs to lock sum_page. In this time, we don't need
639 * to lock this page, because this summary page is not gone anywhere.
640 * Also, this page is not gonna be updated before GC is done.
641 */
642 unlock_page(sum_page);
643 sum = page_address(sum_page);
644
645 switch (GET_SUM_TYPE((&sum->footer))) {
646 case SUM_TYPE_NODE:
647 ret = gc_node_segment(sbi, sum->entries, segno, gc_type);
648 break;
649 case SUM_TYPE_DATA:
650 ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
651 break;
652 }
653 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
654 stat_inc_call_count(sbi->stat_info);
655
656 f2fs_put_page(sum_page, 0);
657 return ret;
658}
659
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900660int f2fs_gc(struct f2fs_sb_info *sbi)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900661{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900662 struct list_head ilist;
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900663 unsigned int segno, i;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900664 int gc_type = BG_GC;
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900665 int gc_status = GC_NONE;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900666
667 INIT_LIST_HEAD(&ilist);
668gc_more:
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900669 if (!(sbi->sb->s_flags & MS_ACTIVE))
670 goto stop;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900671
Namjae Jeon3786dfd2013-01-30 22:47:02 +0900672 if (gc_type == BG_GC && has_not_enough_free_secs(sbi))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900673 gc_type = FG_GC;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900674
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900675 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
676 goto stop;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900677
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900678 for (i = 0; i < sbi->segs_per_sec; i++) {
679 /*
680 * do_garbage_collect will give us three gc_status:
681 * GC_ERROR, GC_DONE, and GC_BLOCKED.
682 * If GC is finished uncleanly, we have to return
683 * the victim to dirty segment list.
684 */
685 gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type);
686 if (gc_status != GC_DONE)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900687 break;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900688 }
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900689 if (has_not_enough_free_secs(sbi)) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900690 write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900691 if (has_not_enough_free_secs(sbi))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900692 goto gc_more;
693 }
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900694stop:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900695 mutex_unlock(&sbi->gc_mutex);
696
697 put_gc_inode(&ilist);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900698 return gc_status;
699}
700
701void build_gc_manager(struct f2fs_sb_info *sbi)
702{
703 DIRTY_I(sbi)->v_ops = &default_v_ops;
704}
705
Namjae Jeon6e6093a2013-01-17 00:08:30 +0900706int __init create_gc_caches(void)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900707{
708 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
709 sizeof(struct inode_entry), NULL);
710 if (!winode_slab)
711 return -ENOMEM;
712 return 0;
713}
714
715void destroy_gc_caches(void)
716{
717 kmem_cache_destroy(winode_slab);
718}