Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 1 | /* |
| 2 | * fs/f2fs/gc.c |
| 3 | * |
| 4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
| 5 | * http://www.samsung.com/ |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/backing-dev.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/f2fs_fs.h> |
| 16 | #include <linux/kthread.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/freezer.h> |
| 19 | #include <linux/blkdev.h> |
| 20 | |
| 21 | #include "f2fs.h" |
| 22 | #include "node.h" |
| 23 | #include "segment.h" |
| 24 | #include "gc.h" |
| 25 | #include <trace/events/f2fs.h> |
| 26 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 27 | static int gc_thread_func(void *data) |
| 28 | { |
| 29 | struct f2fs_sb_info *sbi = data; |
| 30 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; |
| 31 | wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; |
| 32 | long wait_ms; |
| 33 | |
| 34 | wait_ms = gc_th->min_sleep_time; |
| 35 | |
| 36 | do { |
| 37 | if (try_to_freeze()) |
| 38 | continue; |
| 39 | else |
| 40 | wait_event_interruptible_timeout(*wq, |
| 41 | kthread_should_stop(), |
| 42 | msecs_to_jiffies(wait_ms)); |
| 43 | if (kthread_should_stop()) |
| 44 | break; |
| 45 | |
Evan McClain | f3f030d | 2014-07-17 21:16:35 -0400 | [diff] [blame] | 46 | if (sbi->sb->s_frozen >= SB_FREEZE_WRITE) { |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 47 | increase_sleep_time(gc_th, &wait_ms); |
Evan McClain | f3f030d | 2014-07-17 21:16:35 -0400 | [diff] [blame] | 48 | continue; |
| 49 | } |
| 50 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 51 | /* |
| 52 | * [GC triggering condition] |
| 53 | * 0. GC is not conducted currently. |
| 54 | * 1. There are enough dirty segments. |
| 55 | * 2. IO subsystem is idle by checking the # of writeback pages. |
| 56 | * 3. IO subsystem is idle by checking the # of requests in |
| 57 | * bdev's request list. |
| 58 | * |
arter97 | f408140 | 2014-08-06 23:22:50 +0900 | [diff] [blame] | 59 | * Note) We have to avoid triggering GCs frequently. |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 60 | * Because it is possible that some segments can be |
| 61 | * invalidated soon after by user update or deletion. |
| 62 | * So, I'd like to wait some time to collect dirty segments. |
| 63 | */ |
| 64 | if (!mutex_trylock(&sbi->gc_mutex)) |
| 65 | continue; |
| 66 | |
| 67 | if (!is_idle(sbi)) { |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 68 | increase_sleep_time(gc_th, &wait_ms); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 69 | mutex_unlock(&sbi->gc_mutex); |
| 70 | continue; |
| 71 | } |
| 72 | |
| 73 | if (has_enough_invalid_blocks(sbi)) |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 74 | decrease_sleep_time(gc_th, &wait_ms); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 75 | else |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 76 | increase_sleep_time(gc_th, &wait_ms); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 77 | |
| 78 | stat_inc_bggc_count(sbi); |
| 79 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 80 | trace_f2fs_background_gc(sbi->sb, wait_ms, |
| 81 | prefree_segments(sbi), free_segments(sbi)); |
| 82 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 83 | /* if return value is not zero, no victim was selected */ |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 84 | if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC))) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 85 | wait_ms = gc_th->no_gc_sleep_time; |
| 86 | |
| 87 | /* balancing f2fs's metadata periodically */ |
| 88 | f2fs_balance_fs_bg(sbi); |
| 89 | |
| 90 | } while (!kthread_should_stop()); |
| 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | int start_gc_thread(struct f2fs_sb_info *sbi) |
| 95 | { |
| 96 | struct f2fs_gc_kthread *gc_th; |
| 97 | dev_t dev = sbi->sb->s_bdev->bd_dev; |
| 98 | int err = 0; |
| 99 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 100 | gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); |
| 101 | if (!gc_th) { |
| 102 | err = -ENOMEM; |
| 103 | goto out; |
| 104 | } |
| 105 | |
| 106 | gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; |
| 107 | gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; |
| 108 | gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; |
| 109 | |
| 110 | gc_th->gc_idle = 0; |
| 111 | |
| 112 | sbi->gc_thread = gc_th; |
| 113 | init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); |
| 114 | sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, |
| 115 | "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); |
| 116 | if (IS_ERR(gc_th->f2fs_gc_task)) { |
| 117 | err = PTR_ERR(gc_th->f2fs_gc_task); |
| 118 | kfree(gc_th); |
| 119 | sbi->gc_thread = NULL; |
| 120 | } |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 121 | out: |
| 122 | return err; |
| 123 | } |
| 124 | |
| 125 | void stop_gc_thread(struct f2fs_sb_info *sbi) |
| 126 | { |
| 127 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; |
| 128 | if (!gc_th) |
| 129 | return; |
| 130 | kthread_stop(gc_th->f2fs_gc_task); |
| 131 | kfree(gc_th); |
| 132 | sbi->gc_thread = NULL; |
| 133 | } |
| 134 | |
| 135 | static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type) |
| 136 | { |
| 137 | int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY; |
| 138 | |
| 139 | if (gc_th && gc_th->gc_idle) { |
| 140 | if (gc_th->gc_idle == 1) |
| 141 | gc_mode = GC_CB; |
| 142 | else if (gc_th->gc_idle == 2) |
| 143 | gc_mode = GC_GREEDY; |
| 144 | } |
| 145 | return gc_mode; |
| 146 | } |
| 147 | |
| 148 | static void select_policy(struct f2fs_sb_info *sbi, int gc_type, |
| 149 | int type, struct victim_sel_policy *p) |
| 150 | { |
| 151 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
| 152 | |
| 153 | if (p->alloc_mode == SSR) { |
| 154 | p->gc_mode = GC_GREEDY; |
| 155 | p->dirty_segmap = dirty_i->dirty_segmap[type]; |
| 156 | p->max_search = dirty_i->nr_dirty[type]; |
| 157 | p->ofs_unit = 1; |
| 158 | } else { |
| 159 | p->gc_mode = select_gc_type(sbi->gc_thread, gc_type); |
| 160 | p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; |
| 161 | p->max_search = dirty_i->nr_dirty[DIRTY]; |
| 162 | p->ofs_unit = sbi->segs_per_sec; |
| 163 | } |
| 164 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 165 | if (p->max_search > sbi->max_victim_search) |
| 166 | p->max_search = sbi->max_victim_search; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 167 | |
| 168 | p->offset = sbi->last_victim[p->gc_mode]; |
| 169 | } |
| 170 | |
| 171 | static unsigned int get_max_cost(struct f2fs_sb_info *sbi, |
| 172 | struct victim_sel_policy *p) |
| 173 | { |
| 174 | /* SSR allocates in a segment unit */ |
| 175 | if (p->alloc_mode == SSR) |
| 176 | return 1 << sbi->log_blocks_per_seg; |
| 177 | if (p->gc_mode == GC_GREEDY) |
| 178 | return (1 << sbi->log_blocks_per_seg) * p->ofs_unit; |
| 179 | else if (p->gc_mode == GC_CB) |
| 180 | return UINT_MAX; |
| 181 | else /* No other gc_mode */ |
| 182 | return 0; |
| 183 | } |
| 184 | |
| 185 | static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) |
| 186 | { |
| 187 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 188 | unsigned int secno; |
| 189 | |
| 190 | /* |
| 191 | * If the gc_type is FG_GC, we can select victim segments |
| 192 | * selected by background GC before. |
| 193 | * Those segments guarantee they have small valid blocks. |
| 194 | */ |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 195 | for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 196 | if (sec_usage_check(sbi, secno)) |
Chao Yu | 00ebaff | 2014-08-04 10:10:07 +0800 | [diff] [blame] | 197 | continue; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 198 | clear_bit(secno, dirty_i->victim_secmap); |
| 199 | return secno * sbi->segs_per_sec; |
| 200 | } |
| 201 | return NULL_SEGNO; |
| 202 | } |
| 203 | |
| 204 | static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) |
| 205 | { |
| 206 | struct sit_info *sit_i = SIT_I(sbi); |
| 207 | unsigned int secno = GET_SECNO(sbi, segno); |
| 208 | unsigned int start = secno * sbi->segs_per_sec; |
| 209 | unsigned long long mtime = 0; |
| 210 | unsigned int vblocks; |
| 211 | unsigned char age = 0; |
| 212 | unsigned char u; |
| 213 | unsigned int i; |
| 214 | |
| 215 | for (i = 0; i < sbi->segs_per_sec; i++) |
| 216 | mtime += get_seg_entry(sbi, start + i)->mtime; |
| 217 | vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); |
| 218 | |
| 219 | mtime = div_u64(mtime, sbi->segs_per_sec); |
| 220 | vblocks = div_u64(vblocks, sbi->segs_per_sec); |
| 221 | |
| 222 | u = (vblocks * 100) >> sbi->log_blocks_per_seg; |
| 223 | |
arter97 | f408140 | 2014-08-06 23:22:50 +0900 | [diff] [blame] | 224 | /* Handle if the system time has changed by the user */ |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 225 | if (mtime < sit_i->min_mtime) |
| 226 | sit_i->min_mtime = mtime; |
| 227 | if (mtime > sit_i->max_mtime) |
| 228 | sit_i->max_mtime = mtime; |
| 229 | if (sit_i->max_mtime != sit_i->min_mtime) |
| 230 | age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), |
| 231 | sit_i->max_mtime - sit_i->min_mtime); |
| 232 | |
| 233 | return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); |
| 234 | } |
| 235 | |
| 236 | static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, |
| 237 | unsigned int segno, struct victim_sel_policy *p) |
| 238 | { |
| 239 | if (p->alloc_mode == SSR) |
| 240 | return get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
| 241 | |
| 242 | /* alloc_mode == LFS */ |
| 243 | if (p->gc_mode == GC_GREEDY) |
| 244 | return get_valid_blocks(sbi, segno, sbi->segs_per_sec); |
| 245 | else |
| 246 | return get_cb_cost(sbi, segno); |
| 247 | } |
| 248 | |
| 249 | /* |
Evan McClain | f3f030d | 2014-07-17 21:16:35 -0400 | [diff] [blame] | 250 | * This function is called from two paths. |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 251 | * One is garbage collection and the other is SSR segment selection. |
| 252 | * When it is called during GC, it just gets a victim segment |
| 253 | * and it does not remove it from dirty seglist. |
| 254 | * When it is called from SSR segment selection, it finds a segment |
| 255 | * which has minimum valid blocks and removes it from dirty seglist. |
| 256 | */ |
| 257 | static int get_victim_by_default(struct f2fs_sb_info *sbi, |
| 258 | unsigned int *result, int gc_type, int type, char alloc_mode) |
| 259 | { |
| 260 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
| 261 | struct victim_sel_policy p; |
| 262 | unsigned int secno, max_cost; |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 263 | unsigned int last_segment = MAIN_SEGS(sbi); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 264 | int nsearched = 0; |
| 265 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 266 | mutex_lock(&dirty_i->seglist_lock); |
| 267 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 268 | p.alloc_mode = alloc_mode; |
| 269 | select_policy(sbi, gc_type, type, &p); |
| 270 | |
| 271 | p.min_segno = NULL_SEGNO; |
| 272 | p.min_cost = max_cost = get_max_cost(sbi, &p); |
| 273 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 274 | if (p.max_search == 0) |
| 275 | goto out; |
| 276 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 277 | if (p.alloc_mode == LFS && gc_type == FG_GC) { |
| 278 | p.min_segno = check_bg_victims(sbi); |
| 279 | if (p.min_segno != NULL_SEGNO) |
| 280 | goto got_it; |
| 281 | } |
| 282 | |
| 283 | while (1) { |
| 284 | unsigned long cost; |
| 285 | unsigned int segno; |
| 286 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 287 | segno = find_next_bit(p.dirty_segmap, last_segment, p.offset); |
| 288 | if (segno >= last_segment) { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 289 | if (sbi->last_victim[p.gc_mode]) { |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 290 | last_segment = sbi->last_victim[p.gc_mode]; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 291 | sbi->last_victim[p.gc_mode] = 0; |
| 292 | p.offset = 0; |
| 293 | continue; |
| 294 | } |
| 295 | break; |
| 296 | } |
| 297 | |
| 298 | p.offset = segno + p.ofs_unit; |
| 299 | if (p.ofs_unit > 1) |
| 300 | p.offset -= segno % p.ofs_unit; |
| 301 | |
| 302 | secno = GET_SECNO(sbi, segno); |
| 303 | |
| 304 | if (sec_usage_check(sbi, secno)) |
| 305 | continue; |
| 306 | if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) |
| 307 | continue; |
| 308 | |
| 309 | cost = get_gc_cost(sbi, segno, &p); |
| 310 | |
| 311 | if (p.min_cost > cost) { |
| 312 | p.min_segno = segno; |
| 313 | p.min_cost = cost; |
| 314 | } else if (unlikely(cost == max_cost)) { |
| 315 | continue; |
| 316 | } |
| 317 | |
| 318 | if (nsearched++ >= p.max_search) { |
| 319 | sbi->last_victim[p.gc_mode] = segno; |
| 320 | break; |
| 321 | } |
| 322 | } |
| 323 | if (p.min_segno != NULL_SEGNO) { |
| 324 | got_it: |
| 325 | if (p.alloc_mode == LFS) { |
| 326 | secno = GET_SECNO(sbi, p.min_segno); |
| 327 | if (gc_type == FG_GC) |
| 328 | sbi->cur_victim_sec = secno; |
| 329 | else |
| 330 | set_bit(secno, dirty_i->victim_secmap); |
| 331 | } |
| 332 | *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; |
| 333 | |
| 334 | trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, |
| 335 | sbi->cur_victim_sec, |
| 336 | prefree_segments(sbi), free_segments(sbi)); |
| 337 | } |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 338 | out: |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 339 | mutex_unlock(&dirty_i->seglist_lock); |
| 340 | |
| 341 | return (p.min_segno == NULL_SEGNO) ? 0 : 1; |
| 342 | } |
| 343 | |
| 344 | static const struct victim_selection default_v_ops = { |
| 345 | .get_victim = get_victim_by_default, |
| 346 | }; |
| 347 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 348 | static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 349 | { |
| 350 | struct inode_entry *ie; |
| 351 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 352 | ie = radix_tree_lookup(&gc_list->iroot, ino); |
| 353 | if (ie) |
| 354 | return ie->inode; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 355 | return NULL; |
| 356 | } |
| 357 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 358 | static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 359 | { |
| 360 | struct inode_entry *new_ie; |
| 361 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 362 | if (inode == find_gc_inode(gc_list, inode->i_ino)) { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 363 | iput(inode); |
| 364 | return; |
| 365 | } |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 366 | new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 367 | new_ie->inode = inode; |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 368 | |
| 369 | f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); |
| 370 | list_add_tail(&new_ie->list, &gc_list->ilist); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 371 | } |
| 372 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 373 | static void put_gc_inode(struct gc_inode_list *gc_list) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 374 | { |
| 375 | struct inode_entry *ie, *next_ie; |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 376 | list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { |
| 377 | radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 378 | iput(ie->inode); |
| 379 | list_del(&ie->list); |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 380 | kmem_cache_free(inode_entry_slab, ie); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 381 | } |
| 382 | } |
| 383 | |
| 384 | static int check_valid_map(struct f2fs_sb_info *sbi, |
| 385 | unsigned int segno, int offset) |
| 386 | { |
| 387 | struct sit_info *sit_i = SIT_I(sbi); |
| 388 | struct seg_entry *sentry; |
| 389 | int ret; |
| 390 | |
| 391 | mutex_lock(&sit_i->sentry_lock); |
| 392 | sentry = get_seg_entry(sbi, segno); |
| 393 | ret = f2fs_test_bit(offset, sentry->cur_valid_map); |
| 394 | mutex_unlock(&sit_i->sentry_lock); |
| 395 | return ret; |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * This function compares node address got in summary with that in NAT. |
| 400 | * On validity, copy that node with cold status, otherwise (invalid node) |
| 401 | * ignore that. |
| 402 | */ |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 403 | static int gc_node_segment(struct f2fs_sb_info *sbi, |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 404 | struct f2fs_summary *sum, unsigned int segno, int gc_type) |
| 405 | { |
| 406 | bool initial = true; |
| 407 | struct f2fs_summary *entry; |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 408 | block_t start_addr; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 409 | int off; |
| 410 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 411 | start_addr = START_BLOCK(sbi, segno); |
| 412 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 413 | next_step: |
| 414 | entry = sum; |
| 415 | |
| 416 | for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { |
| 417 | nid_t nid = le32_to_cpu(entry->nid); |
| 418 | struct page *node_page; |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 419 | struct node_info ni; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 420 | |
| 421 | /* stop BG_GC if there is not enough free sections. */ |
| 422 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 423 | return 0; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 424 | |
| 425 | if (check_valid_map(sbi, segno, off) == 0) |
| 426 | continue; |
| 427 | |
| 428 | if (initial) { |
| 429 | ra_node_page(sbi, nid); |
| 430 | continue; |
| 431 | } |
| 432 | node_page = get_node_page(sbi, nid); |
| 433 | if (IS_ERR(node_page)) |
| 434 | continue; |
| 435 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 436 | /* block may become invalid during get_node_page */ |
| 437 | if (check_valid_map(sbi, segno, off) == 0) { |
| 438 | f2fs_put_page(node_page, 1); |
| 439 | continue; |
| 440 | } |
| 441 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 442 | get_node_info(sbi, nid, &ni); |
| 443 | if (ni.blk_addr != start_addr + off) { |
| 444 | f2fs_put_page(node_page, 1); |
| 445 | continue; |
| 446 | } |
| 447 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 448 | /* set page dirty and write it */ |
| 449 | if (gc_type == FG_GC) { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 450 | f2fs_wait_on_page_writeback(node_page, NODE); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 451 | set_page_dirty(node_page); |
| 452 | } else { |
| 453 | if (!PageWriteback(node_page)) |
| 454 | set_page_dirty(node_page); |
| 455 | } |
| 456 | f2fs_put_page(node_page, 1); |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 457 | stat_inc_node_blk_count(sbi, 1, gc_type); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | if (initial) { |
| 461 | initial = false; |
| 462 | goto next_step; |
| 463 | } |
| 464 | |
| 465 | if (gc_type == FG_GC) { |
| 466 | struct writeback_control wbc = { |
| 467 | .sync_mode = WB_SYNC_ALL, |
| 468 | .nr_to_write = LONG_MAX, |
| 469 | .for_reclaim = 0, |
| 470 | }; |
| 471 | sync_node_pages(sbi, 0, &wbc); |
| 472 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 473 | /* return 1 only if FG_GC succefully reclaimed one */ |
| 474 | if (get_valid_blocks(sbi, segno, 1) == 0) |
| 475 | return 1; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 476 | } |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 477 | return 0; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 478 | } |
| 479 | |
| 480 | /* |
| 481 | * Calculate start block index indicating the given node offset. |
| 482 | * Be careful, caller should give this node offset only indicating direct node |
| 483 | * blocks. If any node offsets, which point the other types of node blocks such |
| 484 | * as indirect or double indirect node blocks, are given, it must be a caller's |
| 485 | * bug. |
| 486 | */ |
| 487 | block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi) |
| 488 | { |
| 489 | unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; |
| 490 | unsigned int bidx; |
| 491 | |
| 492 | if (node_ofs == 0) |
| 493 | return 0; |
| 494 | |
| 495 | if (node_ofs <= 2) { |
| 496 | bidx = node_ofs - 1; |
| 497 | } else if (node_ofs <= indirect_blks) { |
| 498 | int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); |
| 499 | bidx = node_ofs - 2 - dec; |
| 500 | } else { |
| 501 | int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); |
| 502 | bidx = node_ofs - 5 - dec; |
| 503 | } |
| 504 | return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi); |
| 505 | } |
| 506 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 507 | static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 508 | struct node_info *dni, block_t blkaddr, unsigned int *nofs) |
| 509 | { |
| 510 | struct page *node_page; |
| 511 | nid_t nid; |
| 512 | unsigned int ofs_in_node; |
| 513 | block_t source_blkaddr; |
| 514 | |
| 515 | nid = le32_to_cpu(sum->nid); |
| 516 | ofs_in_node = le16_to_cpu(sum->ofs_in_node); |
| 517 | |
| 518 | node_page = get_node_page(sbi, nid); |
| 519 | if (IS_ERR(node_page)) |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 520 | return false; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 521 | |
| 522 | get_node_info(sbi, nid, dni); |
| 523 | |
| 524 | if (sum->version != dni->version) { |
| 525 | f2fs_put_page(node_page, 1); |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 526 | return false; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 527 | } |
| 528 | |
| 529 | *nofs = ofs_of_node(node_page); |
| 530 | source_blkaddr = datablock_addr(node_page, ofs_in_node); |
| 531 | f2fs_put_page(node_page, 1); |
| 532 | |
| 533 | if (source_blkaddr != blkaddr) |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 534 | return false; |
| 535 | return true; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 536 | } |
| 537 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 538 | static void move_encrypted_block(struct inode *inode, block_t bidx) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 539 | { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 540 | struct f2fs_io_info fio = { |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 541 | .sbi = F2FS_I_SB(inode), |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 542 | .type = DATA, |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 543 | .rw = READ_SYNC, |
| 544 | .encrypted_page = NULL, |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 545 | }; |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 546 | struct dnode_of_data dn; |
| 547 | struct f2fs_summary sum; |
| 548 | struct node_info ni; |
| 549 | struct page *page; |
| 550 | int err; |
| 551 | |
| 552 | /* do not read out */ |
| 553 | page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); |
| 554 | if (!page) |
| 555 | return; |
| 556 | |
| 557 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
| 558 | err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE); |
| 559 | if (err) |
| 560 | goto out; |
| 561 | |
| 562 | if (unlikely(dn.data_blkaddr == NULL_ADDR)) |
| 563 | goto put_out; |
| 564 | |
| 565 | get_node_info(fio.sbi, dn.nid, &ni); |
| 566 | set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); |
| 567 | |
| 568 | /* read page */ |
| 569 | fio.page = page; |
| 570 | fio.blk_addr = dn.data_blkaddr; |
| 571 | |
| 572 | fio.encrypted_page = grab_cache_page(META_MAPPING(fio.sbi), fio.blk_addr); |
| 573 | if (!fio.encrypted_page) |
| 574 | goto put_out; |
| 575 | |
| 576 | err = f2fs_submit_page_bio(&fio); |
| 577 | if (err) |
| 578 | goto put_page_out; |
| 579 | |
| 580 | /* write page */ |
| 581 | lock_page(fio.encrypted_page); |
| 582 | |
| 583 | if (unlikely(!PageUptodate(fio.encrypted_page))) |
| 584 | goto put_page_out; |
| 585 | if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) |
| 586 | goto put_page_out; |
| 587 | |
| 588 | set_page_dirty(fio.encrypted_page); |
| 589 | f2fs_wait_on_page_writeback(fio.encrypted_page, META); |
| 590 | if (clear_page_dirty_for_io(fio.encrypted_page)) |
| 591 | dec_page_count(fio.sbi, F2FS_DIRTY_META); |
| 592 | |
| 593 | set_page_writeback(fio.encrypted_page); |
| 594 | |
| 595 | /* allocate block address */ |
| 596 | f2fs_wait_on_page_writeback(dn.node_page, NODE); |
| 597 | allocate_data_block(fio.sbi, NULL, fio.blk_addr, |
| 598 | &fio.blk_addr, &sum, CURSEG_COLD_DATA); |
| 599 | fio.rw = WRITE_SYNC; |
| 600 | f2fs_submit_page_mbio(&fio); |
| 601 | |
| 602 | dn.data_blkaddr = fio.blk_addr; |
| 603 | set_data_blkaddr(&dn); |
| 604 | f2fs_update_extent_cache(&dn); |
| 605 | set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); |
| 606 | if (page->index == 0) |
| 607 | set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); |
| 608 | put_page_out: |
| 609 | f2fs_put_page(fio.encrypted_page, 1); |
| 610 | put_out: |
| 611 | f2fs_put_dnode(&dn); |
| 612 | out: |
| 613 | f2fs_put_page(page, 1); |
| 614 | } |
| 615 | |
| 616 | static void move_data_page(struct inode *inode, block_t bidx, int gc_type) |
| 617 | { |
| 618 | struct page *page; |
| 619 | |
| 620 | page = get_lock_data_page(inode, bidx, true); |
| 621 | if (IS_ERR(page)) |
| 622 | return; |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 623 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 624 | if (gc_type == BG_GC) { |
| 625 | if (PageWriteback(page)) |
| 626 | goto out; |
| 627 | set_page_dirty(page); |
| 628 | set_cold_data(page); |
| 629 | } else { |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 630 | struct f2fs_io_info fio = { |
| 631 | .sbi = F2FS_I_SB(inode), |
| 632 | .type = DATA, |
| 633 | .rw = WRITE_SYNC, |
| 634 | .page = page, |
| 635 | .encrypted_page = NULL, |
| 636 | }; |
| 637 | set_page_dirty(page); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 638 | f2fs_wait_on_page_writeback(page, DATA); |
Jaegeuk Kim | 9694e66 | 2014-02-07 10:00:06 +0900 | [diff] [blame] | 639 | if (clear_page_dirty_for_io(page)) |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 640 | inode_dec_dirty_pages(inode); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 641 | set_cold_data(page); |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 642 | do_write_data_page(&fio); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 643 | clear_cold_data(page); |
| 644 | } |
| 645 | out: |
| 646 | f2fs_put_page(page, 1); |
| 647 | } |
| 648 | |
| 649 | /* |
| 650 | * This function tries to get parent node of victim data block, and identifies |
| 651 | * data block validity. If the block is valid, copy that with cold status and |
| 652 | * modify parent node. |
| 653 | * If the parent node is not valid or the data block address is different, |
| 654 | * the victim data block is ignored. |
| 655 | */ |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 656 | static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 657 | struct gc_inode_list *gc_list, unsigned int segno, int gc_type) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 658 | { |
| 659 | struct super_block *sb = sbi->sb; |
| 660 | struct f2fs_summary *entry; |
| 661 | block_t start_addr; |
| 662 | int off; |
| 663 | int phase = 0; |
| 664 | |
| 665 | start_addr = START_BLOCK(sbi, segno); |
| 666 | |
| 667 | next_step: |
| 668 | entry = sum; |
| 669 | |
| 670 | for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { |
| 671 | struct page *data_page; |
| 672 | struct inode *inode; |
| 673 | struct node_info dni; /* dnode info for the data */ |
| 674 | unsigned int ofs_in_node, nofs; |
| 675 | block_t start_bidx; |
| 676 | |
| 677 | /* stop BG_GC if there is not enough free sections. */ |
| 678 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 679 | return 0; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 680 | |
| 681 | if (check_valid_map(sbi, segno, off) == 0) |
| 682 | continue; |
| 683 | |
| 684 | if (phase == 0) { |
| 685 | ra_node_page(sbi, le32_to_cpu(entry->nid)); |
| 686 | continue; |
| 687 | } |
| 688 | |
| 689 | /* Get an inode by ino with checking validity */ |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 690 | if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 691 | continue; |
| 692 | |
| 693 | if (phase == 1) { |
| 694 | ra_node_page(sbi, dni.ino); |
| 695 | continue; |
| 696 | } |
| 697 | |
| 698 | ofs_in_node = le16_to_cpu(entry->ofs_in_node); |
| 699 | |
| 700 | if (phase == 2) { |
| 701 | inode = f2fs_iget(sb, dni.ino); |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 702 | if (IS_ERR(inode) || is_bad_inode(inode)) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 703 | continue; |
| 704 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 705 | /* if encrypted inode, let's go phase 3 */ |
| 706 | if (f2fs_encrypted_inode(inode) && |
| 707 | S_ISREG(inode->i_mode)) { |
| 708 | add_gc_inode(gc_list, inode); |
| 709 | continue; |
| 710 | } |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 711 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 712 | start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)); |
| 713 | data_page = get_read_data_page(inode, |
| 714 | start_bidx + ofs_in_node, READA, true); |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 715 | if (IS_ERR(data_page)) { |
| 716 | iput(inode); |
| 717 | continue; |
| 718 | } |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 719 | |
| 720 | f2fs_put_page(data_page, 0); |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 721 | add_gc_inode(gc_list, inode); |
| 722 | continue; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 723 | } |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 724 | |
| 725 | /* phase 3 */ |
| 726 | inode = find_gc_inode(gc_list, dni.ino); |
| 727 | if (inode) { |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 728 | start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)) |
| 729 | + ofs_in_node; |
| 730 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
| 731 | move_encrypted_block(inode, start_bidx); |
| 732 | else |
| 733 | move_data_page(inode, start_bidx, gc_type); |
| 734 | stat_inc_data_blk_count(sbi, 1, gc_type); |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 735 | } |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 736 | } |
| 737 | |
| 738 | if (++phase < 4) |
| 739 | goto next_step; |
| 740 | |
| 741 | if (gc_type == FG_GC) { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 742 | f2fs_submit_merged_bio(sbi, DATA, WRITE); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 743 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 744 | /* return 1 only if FG_GC succefully reclaimed one */ |
| 745 | if (get_valid_blocks(sbi, segno, 1) == 0) |
| 746 | return 1; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 747 | } |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 748 | return 0; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 749 | } |
| 750 | |
| 751 | static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 752 | int gc_type) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 753 | { |
| 754 | struct sit_info *sit_i = SIT_I(sbi); |
| 755 | int ret; |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 756 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 757 | mutex_lock(&sit_i->sentry_lock); |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 758 | ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, |
| 759 | NO_CHECK_TYPE, LFS); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 760 | mutex_unlock(&sit_i->sentry_lock); |
| 761 | return ret; |
| 762 | } |
| 763 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 764 | static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 765 | struct gc_inode_list *gc_list, int gc_type) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 766 | { |
| 767 | struct page *sum_page; |
| 768 | struct f2fs_summary_block *sum; |
| 769 | struct blk_plug plug; |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 770 | int nfree = 0; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 771 | |
| 772 | /* read segment summary of victim */ |
| 773 | sum_page = get_sum_page(sbi, segno); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 774 | |
| 775 | blk_start_plug(&plug); |
| 776 | |
| 777 | sum = page_address(sum_page); |
| 778 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 779 | /* |
| 780 | * this is to avoid deadlock: |
| 781 | * - lock_page(sum_page) - f2fs_replace_block |
| 782 | * - check_valid_map() - mutex_lock(sentry_lock) |
| 783 | * - mutex_lock(sentry_lock) - change_curseg() |
| 784 | * - lock_page(sum_page) |
| 785 | */ |
| 786 | unlock_page(sum_page); |
| 787 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 788 | switch (GET_SUM_TYPE((&sum->footer))) { |
| 789 | case SUM_TYPE_NODE: |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 790 | nfree = gc_node_segment(sbi, sum->entries, segno, gc_type); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 791 | break; |
| 792 | case SUM_TYPE_DATA: |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 793 | nfree = gc_data_segment(sbi, sum->entries, gc_list, |
| 794 | segno, gc_type); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 795 | break; |
| 796 | } |
| 797 | blk_finish_plug(&plug); |
| 798 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 799 | stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 800 | stat_inc_call_count(sbi->stat_info); |
| 801 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 802 | f2fs_put_page(sum_page, 0); |
| 803 | return nfree; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 804 | } |
| 805 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 806 | int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 807 | { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 808 | unsigned int segno, i; |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 809 | int gc_type = sync ? FG_GC : BG_GC; |
| 810 | int sec_freed = 0; |
| 811 | int ret = -EINVAL; |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 812 | struct cp_control cpc; |
| 813 | struct gc_inode_list gc_list = { |
| 814 | .ilist = LIST_HEAD_INIT(gc_list.ilist), |
| 815 | .iroot = RADIX_TREE_INIT(GFP_NOFS), |
| 816 | }; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 817 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 818 | cpc.reason = __get_cp_reason(sbi); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 819 | gc_more: |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 820 | segno = NULL_SEGNO; |
| 821 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 822 | if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 823 | goto stop; |
Jaegeuk Kim | 3ba2947 | 2014-08-11 16:49:25 -0700 | [diff] [blame] | 824 | if (unlikely(f2fs_cp_error(sbi))) |
Jaegeuk Kim | 4c57cbe | 2014-02-05 13:03:57 +0900 | [diff] [blame] | 825 | goto stop; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 826 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 827 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 828 | gc_type = FG_GC; |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 829 | if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi)) |
| 830 | write_checkpoint(sbi, &cpc); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 831 | } |
| 832 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 833 | if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type)) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 834 | goto stop; |
| 835 | ret = 0; |
| 836 | |
Chao Yu | 99926a9 | 2014-02-27 19:12:24 +0800 | [diff] [blame] | 837 | /* readahead multi ssa blocks those have contiguous address */ |
| 838 | if (sbi->segs_per_sec > 1) |
| 839 | ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec, |
| 840 | META_SSA); |
| 841 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 842 | for (i = 0; i < sbi->segs_per_sec; i++) { |
| 843 | /* |
| 844 | * for FG_GC case, halt gcing left segments once failed one |
| 845 | * of segments in selected section to avoid long latency. |
| 846 | */ |
| 847 | if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) && |
| 848 | gc_type == FG_GC) |
| 849 | break; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 850 | } |
| 851 | |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 852 | if (i == sbi->segs_per_sec && gc_type == FG_GC) |
| 853 | sec_freed++; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 854 | |
| 855 | if (gc_type == FG_GC) |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 856 | sbi->cur_victim_sec = NULL_SEGNO; |
| 857 | |
| 858 | if (!sync) { |
| 859 | if (has_not_enough_free_secs(sbi, sec_freed)) |
| 860 | goto gc_more; |
| 861 | |
| 862 | if (gc_type == FG_GC) |
| 863 | write_checkpoint(sbi, &cpc); |
| 864 | } |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 865 | stop: |
| 866 | mutex_unlock(&sbi->gc_mutex); |
| 867 | |
Jaegeuk Kim | e0cea84 | 2015-02-18 20:43:11 -0600 | [diff] [blame] | 868 | put_gc_inode(&gc_list); |
Jaegeuk Kim | 4fae7dd | 2015-11-12 10:23:18 -0600 | [diff] [blame^] | 869 | |
| 870 | if (sync) |
| 871 | ret = sec_freed ? 0 : -EAGAIN; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 872 | return ret; |
| 873 | } |
| 874 | |
| 875 | void build_gc_manager(struct f2fs_sb_info *sbi) |
| 876 | { |
| 877 | DIRTY_I(sbi)->v_ops = &default_v_ops; |
| 878 | } |