Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 1 | /* |
| 2 | * fs/f2fs/checkpoint.c |
| 3 | * |
| 4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
| 5 | * http://www.samsung.com/ |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/bio.h> |
| 13 | #include <linux/mpage.h> |
| 14 | #include <linux/writeback.h> |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/f2fs_fs.h> |
| 17 | #include <linux/pagevec.h> |
| 18 | #include <linux/swap.h> |
| 19 | |
| 20 | #include "f2fs.h" |
| 21 | #include "node.h" |
| 22 | #include "segment.h" |
| 23 | #include <trace/events/f2fs.h> |
| 24 | |
| 25 | static struct kmem_cache *orphan_entry_slab; |
| 26 | static struct kmem_cache *inode_entry_slab; |
| 27 | |
| 28 | /* |
| 29 | * We guarantee no failure on the returned page. |
| 30 | */ |
| 31 | struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) |
| 32 | { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 33 | struct address_space *mapping = META_MAPPING(sbi); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 34 | struct page *page = NULL; |
| 35 | repeat: |
Jaegeuk Kim | b668739 | 2014-04-30 09:18:53 +0900 | [diff] [blame] | 36 | page = grab_cache_page(mapping, index); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 37 | if (!page) { |
| 38 | cond_resched(); |
| 39 | goto repeat; |
| 40 | } |
Jaegeuk Kim | b668739 | 2014-04-30 09:18:53 +0900 | [diff] [blame] | 41 | f2fs_wait_on_page_writeback(page, META); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 42 | SetPageUptodate(page); |
| 43 | return page; |
| 44 | } |
| 45 | |
| 46 | /* |
| 47 | * We guarantee no failure on the returned page. |
| 48 | */ |
| 49 | struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) |
| 50 | { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 51 | struct address_space *mapping = META_MAPPING(sbi); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 52 | struct page *page; |
| 53 | repeat: |
| 54 | page = grab_cache_page(mapping, index); |
| 55 | if (!page) { |
| 56 | cond_resched(); |
| 57 | goto repeat; |
| 58 | } |
| 59 | if (PageUptodate(page)) |
| 60 | goto out; |
| 61 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 62 | if (f2fs_submit_page_bio(sbi, page, index, |
| 63 | READ_SYNC | REQ_META | REQ_PRIO)) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 64 | goto repeat; |
| 65 | |
| 66 | lock_page(page); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 67 | if (unlikely(page->mapping != mapping)) { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 68 | f2fs_put_page(page, 1); |
| 69 | goto repeat; |
| 70 | } |
| 71 | out: |
| 72 | mark_page_accessed(page); |
| 73 | return page; |
| 74 | } |
| 75 | |
Fabian Frederick | 179c117 | 2014-04-17 17:51:06 +0200 | [diff] [blame] | 76 | static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type) |
Chao Yu | 624b14f | 2014-02-07 16:11:53 +0800 | [diff] [blame] | 77 | { |
| 78 | switch (type) { |
| 79 | case META_NAT: |
| 80 | return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK; |
| 81 | case META_SIT: |
| 82 | return SIT_BLK_CNT(sbi); |
Chao Yu | 99926a9 | 2014-02-27 19:12:24 +0800 | [diff] [blame] | 83 | case META_SSA: |
Chao Yu | 624b14f | 2014-02-07 16:11:53 +0800 | [diff] [blame] | 84 | case META_CP: |
| 85 | return 0; |
| 86 | default: |
| 87 | BUG(); |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | /* |
Chao Yu | 99926a9 | 2014-02-27 19:12:24 +0800 | [diff] [blame] | 92 | * Readahead CP/NAT/SIT/SSA pages |
Chao Yu | 624b14f | 2014-02-07 16:11:53 +0800 | [diff] [blame] | 93 | */ |
| 94 | int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type) |
| 95 | { |
| 96 | block_t prev_blk_addr = 0; |
| 97 | struct page *page; |
| 98 | int blkno = start; |
| 99 | int max_blks = get_max_meta_blks(sbi, type); |
| 100 | |
| 101 | struct f2fs_io_info fio = { |
| 102 | .type = META, |
| 103 | .rw = READ_SYNC | REQ_META | REQ_PRIO |
| 104 | }; |
| 105 | |
| 106 | for (; nrpages-- > 0; blkno++) { |
| 107 | block_t blk_addr; |
| 108 | |
| 109 | switch (type) { |
| 110 | case META_NAT: |
| 111 | /* get nat block addr */ |
| 112 | if (unlikely(blkno >= max_blks)) |
| 113 | blkno = 0; |
| 114 | blk_addr = current_nat_addr(sbi, |
| 115 | blkno * NAT_ENTRY_PER_BLOCK); |
| 116 | break; |
| 117 | case META_SIT: |
| 118 | /* get sit block addr */ |
| 119 | if (unlikely(blkno >= max_blks)) |
| 120 | goto out; |
| 121 | blk_addr = current_sit_addr(sbi, |
| 122 | blkno * SIT_ENTRY_PER_BLOCK); |
| 123 | if (blkno != start && prev_blk_addr + 1 != blk_addr) |
| 124 | goto out; |
| 125 | prev_blk_addr = blk_addr; |
| 126 | break; |
Chao Yu | 99926a9 | 2014-02-27 19:12:24 +0800 | [diff] [blame] | 127 | case META_SSA: |
Chao Yu | 624b14f | 2014-02-07 16:11:53 +0800 | [diff] [blame] | 128 | case META_CP: |
Chao Yu | 99926a9 | 2014-02-27 19:12:24 +0800 | [diff] [blame] | 129 | /* get ssa/cp block addr */ |
Chao Yu | 624b14f | 2014-02-07 16:11:53 +0800 | [diff] [blame] | 130 | blk_addr = blkno; |
| 131 | break; |
| 132 | default: |
| 133 | BUG(); |
| 134 | } |
| 135 | |
| 136 | page = grab_cache_page(META_MAPPING(sbi), blk_addr); |
| 137 | if (!page) |
| 138 | continue; |
| 139 | if (PageUptodate(page)) { |
| 140 | mark_page_accessed(page); |
| 141 | f2fs_put_page(page, 1); |
| 142 | continue; |
| 143 | } |
| 144 | |
| 145 | f2fs_submit_page_mbio(sbi, page, blk_addr, &fio); |
| 146 | mark_page_accessed(page); |
| 147 | f2fs_put_page(page, 0); |
| 148 | } |
| 149 | out: |
| 150 | f2fs_submit_merged_bio(sbi, META, READ); |
| 151 | return blkno - start; |
| 152 | } |
| 153 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 154 | static int f2fs_write_meta_page(struct page *page, |
| 155 | struct writeback_control *wbc) |
| 156 | { |
| 157 | struct inode *inode = page->mapping->host; |
| 158 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
| 159 | |
Chao Yu | 327cb6d | 2014-05-06 16:48:26 +0800 | [diff] [blame] | 160 | trace_f2fs_writepage(page, META); |
| 161 | |
Jaegeuk Kim | 4c57cbe | 2014-02-05 13:03:57 +0900 | [diff] [blame] | 162 | if (unlikely(sbi->por_doing)) |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 163 | goto redirty_out; |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 164 | if (wbc->for_reclaim) |
| 165 | goto redirty_out; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 166 | |
Jaegeuk Kim | 4c57cbe | 2014-02-05 13:03:57 +0900 | [diff] [blame] | 167 | /* Should not write any meta pages, if any IO error was occurred */ |
| 168 | if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG))) |
| 169 | goto no_write; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 170 | |
Jaegeuk Kim | 4b66d80 | 2014-03-18 13:29:07 +0900 | [diff] [blame] | 171 | f2fs_wait_on_page_writeback(page, META); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 172 | write_meta_page(sbi, page); |
Jaegeuk Kim | 4c57cbe | 2014-02-05 13:03:57 +0900 | [diff] [blame] | 173 | no_write: |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 174 | dec_page_count(sbi, F2FS_DIRTY_META); |
| 175 | unlock_page(page); |
| 176 | return 0; |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 177 | |
| 178 | redirty_out: |
Jaegeuk Kim | dc8c246 | 2014-04-15 16:04:15 +0900 | [diff] [blame] | 179 | redirty_page_for_writepage(wbc, page); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 180 | return AOP_WRITEPAGE_ACTIVATE; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | static int f2fs_write_meta_pages(struct address_space *mapping, |
| 184 | struct writeback_control *wbc) |
| 185 | { |
| 186 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); |
Jaegeuk Kim | 1f1eaf4 | 2014-03-18 13:47:11 +0900 | [diff] [blame] | 187 | long diff, written; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 188 | |
Chao Yu | b4d8549 | 2014-05-06 16:51:24 +0800 | [diff] [blame] | 189 | trace_f2fs_writepages(mapping->host, wbc, META); |
| 190 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 191 | /* collect a number of dirty meta pages and write together */ |
Jaegeuk Kim | 1f1eaf4 | 2014-03-18 13:47:11 +0900 | [diff] [blame] | 192 | if (wbc->for_kupdate || |
| 193 | get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META)) |
Jaegeuk Kim | 823d59f | 2014-03-18 13:43:05 +0900 | [diff] [blame] | 194 | goto skip_write; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 195 | |
| 196 | /* if mounting is failed, skip writing node pages */ |
| 197 | mutex_lock(&sbi->cp_mutex); |
Jaegeuk Kim | 1f1eaf4 | 2014-03-18 13:47:11 +0900 | [diff] [blame] | 198 | diff = nr_pages_to_write(sbi, META, wbc); |
| 199 | written = sync_meta_pages(sbi, META, wbc->nr_to_write); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 200 | mutex_unlock(&sbi->cp_mutex); |
Jaegeuk Kim | 1f1eaf4 | 2014-03-18 13:47:11 +0900 | [diff] [blame] | 201 | wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 202 | return 0; |
Jaegeuk Kim | 823d59f | 2014-03-18 13:43:05 +0900 | [diff] [blame] | 203 | |
| 204 | skip_write: |
| 205 | wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); |
| 206 | return 0; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, |
| 210 | long nr_to_write) |
| 211 | { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 212 | struct address_space *mapping = META_MAPPING(sbi); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 213 | pgoff_t index = 0, end = LONG_MAX; |
| 214 | struct pagevec pvec; |
| 215 | long nwritten = 0; |
| 216 | struct writeback_control wbc = { |
| 217 | .for_reclaim = 0, |
| 218 | }; |
| 219 | |
| 220 | pagevec_init(&pvec, 0); |
| 221 | |
| 222 | while (index <= end) { |
| 223 | int i, nr_pages; |
| 224 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, |
| 225 | PAGECACHE_TAG_DIRTY, |
| 226 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 227 | if (unlikely(nr_pages == 0)) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 228 | break; |
| 229 | |
| 230 | for (i = 0; i < nr_pages; i++) { |
| 231 | struct page *page = pvec.pages[i]; |
Jaegeuk Kim | 4c57cbe | 2014-02-05 13:03:57 +0900 | [diff] [blame] | 232 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 233 | lock_page(page); |
Jaegeuk Kim | 4c57cbe | 2014-02-05 13:03:57 +0900 | [diff] [blame] | 234 | |
| 235 | if (unlikely(page->mapping != mapping)) { |
| 236 | continue_unlock: |
| 237 | unlock_page(page); |
| 238 | continue; |
| 239 | } |
| 240 | if (!PageDirty(page)) { |
| 241 | /* someone wrote it for us */ |
| 242 | goto continue_unlock; |
| 243 | } |
| 244 | |
| 245 | if (!clear_page_dirty_for_io(page)) |
| 246 | goto continue_unlock; |
| 247 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 248 | if (f2fs_write_meta_page(page, &wbc)) { |
| 249 | unlock_page(page); |
| 250 | break; |
| 251 | } |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 252 | nwritten++; |
| 253 | if (unlikely(nwritten >= nr_to_write)) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 254 | break; |
| 255 | } |
| 256 | pagevec_release(&pvec); |
| 257 | cond_resched(); |
| 258 | } |
| 259 | |
| 260 | if (nwritten) |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 261 | f2fs_submit_merged_bio(sbi, type, WRITE); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 262 | |
| 263 | return nwritten; |
| 264 | } |
| 265 | |
| 266 | static int f2fs_set_meta_page_dirty(struct page *page) |
| 267 | { |
| 268 | struct address_space *mapping = page->mapping; |
| 269 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); |
| 270 | |
| 271 | trace_f2fs_set_page_dirty(page, META); |
| 272 | |
| 273 | SetPageUptodate(page); |
| 274 | if (!PageDirty(page)) { |
| 275 | __set_page_dirty_nobuffers(page); |
| 276 | inc_page_count(sbi, F2FS_DIRTY_META); |
| 277 | return 1; |
| 278 | } |
| 279 | return 0; |
| 280 | } |
| 281 | |
| 282 | const struct address_space_operations f2fs_meta_aops = { |
| 283 | .writepage = f2fs_write_meta_page, |
| 284 | .writepages = f2fs_write_meta_pages, |
| 285 | .set_page_dirty = f2fs_set_meta_page_dirty, |
| 286 | }; |
| 287 | |
| 288 | int acquire_orphan_inode(struct f2fs_sb_info *sbi) |
| 289 | { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 290 | int err = 0; |
| 291 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 292 | spin_lock(&sbi->orphan_inode_lock); |
| 293 | if (unlikely(sbi->n_orphans >= sbi->max_orphans)) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 294 | err = -ENOSPC; |
| 295 | else |
| 296 | sbi->n_orphans++; |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 297 | spin_unlock(&sbi->orphan_inode_lock); |
| 298 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 299 | return err; |
| 300 | } |
| 301 | |
| 302 | void release_orphan_inode(struct f2fs_sb_info *sbi) |
| 303 | { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 304 | spin_lock(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 305 | f2fs_bug_on(sbi->n_orphans == 0); |
| 306 | sbi->n_orphans--; |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 307 | spin_unlock(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 308 | } |
| 309 | |
| 310 | void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) |
| 311 | { |
Chao Yu | 48c561a | 2014-03-29 11:33:17 +0800 | [diff] [blame] | 312 | struct list_head *head; |
| 313 | struct orphan_inode_entry *new, *orphan; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 314 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 315 | new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC); |
| 316 | new->ino = ino; |
| 317 | |
| 318 | spin_lock(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 319 | head = &sbi->orphan_inode_list; |
Chao Yu | 48c561a | 2014-03-29 11:33:17 +0800 | [diff] [blame] | 320 | list_for_each_entry(orphan, head, list) { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 321 | if (orphan->ino == ino) { |
| 322 | spin_unlock(&sbi->orphan_inode_lock); |
| 323 | kmem_cache_free(orphan_entry_slab, new); |
| 324 | return; |
| 325 | } |
| 326 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 327 | if (orphan->ino > ino) |
| 328 | break; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 329 | } |
| 330 | |
Chao Yu | 48c561a | 2014-03-29 11:33:17 +0800 | [diff] [blame] | 331 | /* add new orphan entry into list which is sorted by inode number */ |
| 332 | list_add_tail(&new->list, &orphan->list); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 333 | spin_unlock(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) |
| 337 | { |
| 338 | struct list_head *head; |
| 339 | struct orphan_inode_entry *orphan; |
| 340 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 341 | spin_lock(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 342 | head = &sbi->orphan_inode_list; |
| 343 | list_for_each_entry(orphan, head, list) { |
| 344 | if (orphan->ino == ino) { |
| 345 | list_del(&orphan->list); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 346 | f2fs_bug_on(sbi->n_orphans == 0); |
| 347 | sbi->n_orphans--; |
Chao Yu | 784b135 | 2014-04-02 08:55:00 +0800 | [diff] [blame] | 348 | spin_unlock(&sbi->orphan_inode_lock); |
| 349 | kmem_cache_free(orphan_entry_slab, orphan); |
| 350 | return; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 351 | } |
| 352 | } |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 353 | spin_unlock(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) |
| 357 | { |
| 358 | struct inode *inode = f2fs_iget(sbi->sb, ino); |
| 359 | f2fs_bug_on(IS_ERR(inode)); |
| 360 | clear_nlink(inode); |
| 361 | |
| 362 | /* truncate all the data during iput */ |
| 363 | iput(inode); |
| 364 | } |
| 365 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 366 | void recover_orphan_inodes(struct f2fs_sb_info *sbi) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 367 | { |
| 368 | block_t start_blk, orphan_blkaddr, i, j; |
| 369 | |
| 370 | if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 371 | return; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 372 | |
| 373 | sbi->por_doing = true; |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 374 | |
| 375 | start_blk = __start_cp_addr(sbi) + 1 + |
| 376 | le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 377 | orphan_blkaddr = __start_sum_addr(sbi) - 1; |
| 378 | |
Chao Yu | 624b14f | 2014-02-07 16:11:53 +0800 | [diff] [blame] | 379 | ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP); |
| 380 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 381 | for (i = 0; i < orphan_blkaddr; i++) { |
| 382 | struct page *page = get_meta_page(sbi, start_blk + i); |
| 383 | struct f2fs_orphan_block *orphan_blk; |
| 384 | |
| 385 | orphan_blk = (struct f2fs_orphan_block *)page_address(page); |
| 386 | for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { |
| 387 | nid_t ino = le32_to_cpu(orphan_blk->ino[j]); |
| 388 | recover_orphan_inode(sbi, ino); |
| 389 | } |
| 390 | f2fs_put_page(page, 1); |
| 391 | } |
| 392 | /* clear Orphan Flag */ |
| 393 | clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); |
| 394 | sbi->por_doing = false; |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 395 | return; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 396 | } |
| 397 | |
| 398 | static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) |
| 399 | { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 400 | struct list_head *head; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 401 | struct f2fs_orphan_block *orphan_blk = NULL; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 402 | unsigned int nentries = 0; |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 403 | unsigned short index; |
| 404 | unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans + |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 405 | (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 406 | struct page *page = NULL; |
| 407 | struct orphan_inode_entry *orphan = NULL; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 408 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 409 | for (index = 0; index < orphan_blocks; index++) |
| 410 | grab_meta_page(sbi, start_blk + index); |
| 411 | |
| 412 | index = 1; |
| 413 | spin_lock(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 414 | head = &sbi->orphan_inode_list; |
| 415 | |
| 416 | /* loop for each orphan inode entry and write them in Jornal block */ |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 417 | list_for_each_entry(orphan, head, list) { |
| 418 | if (!page) { |
| 419 | page = find_get_page(META_MAPPING(sbi), start_blk++); |
| 420 | f2fs_bug_on(!page); |
| 421 | orphan_blk = |
| 422 | (struct f2fs_orphan_block *)page_address(page); |
| 423 | memset(orphan_blk, 0, sizeof(*orphan_blk)); |
| 424 | f2fs_put_page(page, 0); |
| 425 | } |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 426 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 427 | orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 428 | |
| 429 | if (nentries == F2FS_ORPHANS_PER_BLOCK) { |
| 430 | /* |
| 431 | * an orphan block is full of 1020 entries, |
| 432 | * then we need to flush current orphan blocks |
| 433 | * and bring another one in memory |
| 434 | */ |
| 435 | orphan_blk->blk_addr = cpu_to_le16(index); |
| 436 | orphan_blk->blk_count = cpu_to_le16(orphan_blocks); |
| 437 | orphan_blk->entry_count = cpu_to_le32(nentries); |
| 438 | set_page_dirty(page); |
| 439 | f2fs_put_page(page, 1); |
| 440 | index++; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 441 | nentries = 0; |
| 442 | page = NULL; |
| 443 | } |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 444 | } |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 445 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 446 | if (page) { |
| 447 | orphan_blk->blk_addr = cpu_to_le16(index); |
| 448 | orphan_blk->blk_count = cpu_to_le16(orphan_blocks); |
| 449 | orphan_blk->entry_count = cpu_to_le32(nentries); |
| 450 | set_page_dirty(page); |
| 451 | f2fs_put_page(page, 1); |
| 452 | } |
| 453 | |
| 454 | spin_unlock(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, |
| 458 | block_t cp_addr, unsigned long long *version) |
| 459 | { |
| 460 | struct page *cp_page_1, *cp_page_2 = NULL; |
| 461 | unsigned long blk_size = sbi->blocksize; |
| 462 | struct f2fs_checkpoint *cp_block; |
| 463 | unsigned long long cur_version = 0, pre_version = 0; |
| 464 | size_t crc_offset; |
| 465 | __u32 crc = 0; |
| 466 | |
| 467 | /* Read the 1st cp block in this CP pack */ |
| 468 | cp_page_1 = get_meta_page(sbi, cp_addr); |
| 469 | |
| 470 | /* get the version number */ |
| 471 | cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1); |
| 472 | crc_offset = le32_to_cpu(cp_block->checksum_offset); |
| 473 | if (crc_offset >= blk_size) |
| 474 | goto invalid_cp1; |
| 475 | |
| 476 | crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); |
| 477 | if (!f2fs_crc_valid(crc, cp_block, crc_offset)) |
| 478 | goto invalid_cp1; |
| 479 | |
| 480 | pre_version = cur_cp_version(cp_block); |
| 481 | |
| 482 | /* Read the 2nd cp block in this CP pack */ |
| 483 | cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; |
| 484 | cp_page_2 = get_meta_page(sbi, cp_addr); |
| 485 | |
| 486 | cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2); |
| 487 | crc_offset = le32_to_cpu(cp_block->checksum_offset); |
| 488 | if (crc_offset >= blk_size) |
| 489 | goto invalid_cp2; |
| 490 | |
| 491 | crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); |
| 492 | if (!f2fs_crc_valid(crc, cp_block, crc_offset)) |
| 493 | goto invalid_cp2; |
| 494 | |
| 495 | cur_version = cur_cp_version(cp_block); |
| 496 | |
| 497 | if (cur_version == pre_version) { |
| 498 | *version = cur_version; |
| 499 | f2fs_put_page(cp_page_2, 1); |
| 500 | return cp_page_1; |
| 501 | } |
| 502 | invalid_cp2: |
| 503 | f2fs_put_page(cp_page_2, 1); |
| 504 | invalid_cp1: |
| 505 | f2fs_put_page(cp_page_1, 1); |
| 506 | return NULL; |
| 507 | } |
| 508 | |
| 509 | int get_valid_checkpoint(struct f2fs_sb_info *sbi) |
| 510 | { |
| 511 | struct f2fs_checkpoint *cp_block; |
| 512 | struct f2fs_super_block *fsb = sbi->raw_super; |
| 513 | struct page *cp1, *cp2, *cur_page; |
| 514 | unsigned long blk_size = sbi->blocksize; |
| 515 | unsigned long long cp1_version = 0, cp2_version = 0; |
| 516 | unsigned long long cp_start_blk_no; |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 517 | unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); |
| 518 | block_t cp_blk_no; |
| 519 | int i; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 520 | |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 521 | sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 522 | if (!sbi->ckpt) |
| 523 | return -ENOMEM; |
| 524 | /* |
| 525 | * Finding out valid cp block involves read both |
| 526 | * sets( cp pack1 and cp pack 2) |
| 527 | */ |
| 528 | cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); |
| 529 | cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); |
| 530 | |
| 531 | /* The second checkpoint pack should start at the next segment */ |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 532 | cp_start_blk_no += ((unsigned long long)1) << |
| 533 | le32_to_cpu(fsb->log_blocks_per_seg); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 534 | cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); |
| 535 | |
| 536 | if (cp1 && cp2) { |
| 537 | if (ver_after(cp2_version, cp1_version)) |
| 538 | cur_page = cp2; |
| 539 | else |
| 540 | cur_page = cp1; |
| 541 | } else if (cp1) { |
| 542 | cur_page = cp1; |
| 543 | } else if (cp2) { |
| 544 | cur_page = cp2; |
| 545 | } else { |
| 546 | goto fail_no_cp; |
| 547 | } |
| 548 | |
| 549 | cp_block = (struct f2fs_checkpoint *)page_address(cur_page); |
| 550 | memcpy(sbi->ckpt, cp_block, blk_size); |
| 551 | |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 552 | if (cp_blks <= 1) |
| 553 | goto done; |
| 554 | |
| 555 | cp_blk_no = le32_to_cpu(fsb->cp_blkaddr); |
| 556 | if (cur_page == cp2) |
| 557 | cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg); |
| 558 | |
| 559 | for (i = 1; i < cp_blks; i++) { |
| 560 | void *sit_bitmap_ptr; |
| 561 | unsigned char *ckpt = (unsigned char *)sbi->ckpt; |
| 562 | |
| 563 | cur_page = get_meta_page(sbi, cp_blk_no + i); |
| 564 | sit_bitmap_ptr = page_address(cur_page); |
| 565 | memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size); |
| 566 | f2fs_put_page(cur_page, 1); |
| 567 | } |
| 568 | done: |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 569 | f2fs_put_page(cp1, 1); |
| 570 | f2fs_put_page(cp2, 1); |
| 571 | return 0; |
| 572 | |
| 573 | fail_no_cp: |
| 574 | kfree(sbi->ckpt); |
| 575 | return -EINVAL; |
| 576 | } |
| 577 | |
| 578 | static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new) |
| 579 | { |
| 580 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 581 | |
Jaegeuk Kim | 4679a94 | 2014-04-15 11:19:28 +0900 | [diff] [blame] | 582 | if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) |
| 583 | return -EEXIST; |
Chao Yu | 48c561a | 2014-03-29 11:33:17 +0800 | [diff] [blame] | 584 | |
Jaegeuk Kim | 4679a94 | 2014-04-15 11:19:28 +0900 | [diff] [blame] | 585 | set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR); |
| 586 | F2FS_I(inode)->dirty_dir = new; |
| 587 | list_add_tail(&new->list, &sbi->dir_inode_list); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 588 | stat_inc_dirty_dir(sbi); |
| 589 | return 0; |
| 590 | } |
| 591 | |
| 592 | void set_dirty_dir_page(struct inode *inode, struct page *page) |
| 593 | { |
| 594 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
| 595 | struct dir_inode_entry *new; |
Chao Yu | 784b135 | 2014-04-02 08:55:00 +0800 | [diff] [blame] | 596 | int ret = 0; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 597 | |
| 598 | if (!S_ISDIR(inode->i_mode)) |
| 599 | return; |
| 600 | |
| 601 | new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); |
| 602 | new->inode = inode; |
| 603 | INIT_LIST_HEAD(&new->list); |
| 604 | |
| 605 | spin_lock(&sbi->dir_inode_lock); |
Chao Yu | 784b135 | 2014-04-02 08:55:00 +0800 | [diff] [blame] | 606 | ret = __add_dirty_inode(inode, new); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 607 | inode_inc_dirty_dents(inode); |
| 608 | SetPagePrivate(page); |
| 609 | spin_unlock(&sbi->dir_inode_lock); |
Chao Yu | 784b135 | 2014-04-02 08:55:00 +0800 | [diff] [blame] | 610 | |
| 611 | if (ret) |
| 612 | kmem_cache_free(inode_entry_slab, new); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 613 | } |
| 614 | |
| 615 | void add_dirty_dir_inode(struct inode *inode) |
| 616 | { |
| 617 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
| 618 | struct dir_inode_entry *new = |
| 619 | f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); |
Chao Yu | 784b135 | 2014-04-02 08:55:00 +0800 | [diff] [blame] | 620 | int ret = 0; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 621 | |
| 622 | new->inode = inode; |
| 623 | INIT_LIST_HEAD(&new->list); |
| 624 | |
| 625 | spin_lock(&sbi->dir_inode_lock); |
Chao Yu | 784b135 | 2014-04-02 08:55:00 +0800 | [diff] [blame] | 626 | ret = __add_dirty_inode(inode, new); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 627 | spin_unlock(&sbi->dir_inode_lock); |
Chao Yu | 784b135 | 2014-04-02 08:55:00 +0800 | [diff] [blame] | 628 | |
| 629 | if (ret) |
| 630 | kmem_cache_free(inode_entry_slab, new); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 631 | } |
| 632 | |
| 633 | void remove_dirty_dir_inode(struct inode *inode) |
| 634 | { |
| 635 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
Chao Yu | 48c561a | 2014-03-29 11:33:17 +0800 | [diff] [blame] | 636 | struct dir_inode_entry *entry; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 637 | |
| 638 | if (!S_ISDIR(inode->i_mode)) |
| 639 | return; |
| 640 | |
| 641 | spin_lock(&sbi->dir_inode_lock); |
Jaegeuk Kim | 4679a94 | 2014-04-15 11:19:28 +0900 | [diff] [blame] | 642 | if (get_dirty_dents(inode) || |
| 643 | !is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 644 | spin_unlock(&sbi->dir_inode_lock); |
| 645 | return; |
| 646 | } |
| 647 | |
Jaegeuk Kim | 4679a94 | 2014-04-15 11:19:28 +0900 | [diff] [blame] | 648 | entry = F2FS_I(inode)->dirty_dir; |
| 649 | list_del(&entry->list); |
| 650 | F2FS_I(inode)->dirty_dir = NULL; |
| 651 | clear_inode_flag(F2FS_I(inode), FI_DIRTY_DIR); |
| 652 | stat_dec_dirty_dir(sbi); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 653 | spin_unlock(&sbi->dir_inode_lock); |
Jaegeuk Kim | 4679a94 | 2014-04-15 11:19:28 +0900 | [diff] [blame] | 654 | kmem_cache_free(inode_entry_slab, entry); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 655 | |
| 656 | /* Only from the recovery routine */ |
| 657 | if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) { |
| 658 | clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT); |
| 659 | iput(inode); |
| 660 | } |
| 661 | } |
| 662 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 663 | void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) |
| 664 | { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 665 | struct list_head *head; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 666 | struct dir_inode_entry *entry; |
| 667 | struct inode *inode; |
| 668 | retry: |
| 669 | spin_lock(&sbi->dir_inode_lock); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 670 | |
| 671 | head = &sbi->dir_inode_list; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 672 | if (list_empty(head)) { |
| 673 | spin_unlock(&sbi->dir_inode_lock); |
| 674 | return; |
| 675 | } |
| 676 | entry = list_entry(head->next, struct dir_inode_entry, list); |
| 677 | inode = igrab(entry->inode); |
| 678 | spin_unlock(&sbi->dir_inode_lock); |
| 679 | if (inode) { |
Jaegeuk Kim | c7f9f43 | 2014-03-18 12:40:49 +0900 | [diff] [blame] | 680 | filemap_fdatawrite(inode->i_mapping); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 681 | iput(inode); |
| 682 | } else { |
| 683 | /* |
| 684 | * We should submit bio, since it exists several |
| 685 | * wribacking dentry pages in the freeing inode. |
| 686 | */ |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 687 | f2fs_submit_merged_bio(sbi, DATA, WRITE); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 688 | } |
| 689 | goto retry; |
| 690 | } |
| 691 | |
| 692 | /* |
| 693 | * Freeze all the FS-operations for checkpoint. |
| 694 | */ |
| 695 | static void block_operations(struct f2fs_sb_info *sbi) |
| 696 | { |
| 697 | struct writeback_control wbc = { |
| 698 | .sync_mode = WB_SYNC_ALL, |
| 699 | .nr_to_write = LONG_MAX, |
| 700 | .for_reclaim = 0, |
| 701 | }; |
| 702 | struct blk_plug plug; |
| 703 | |
| 704 | blk_start_plug(&plug); |
| 705 | |
| 706 | retry_flush_dents: |
| 707 | f2fs_lock_all(sbi); |
| 708 | /* write all the dirty dentry pages */ |
| 709 | if (get_pages(sbi, F2FS_DIRTY_DENTS)) { |
| 710 | f2fs_unlock_all(sbi); |
| 711 | sync_dirty_dir_inodes(sbi); |
| 712 | goto retry_flush_dents; |
| 713 | } |
| 714 | |
| 715 | /* |
| 716 | * POR: we should ensure that there is no dirty node pages |
| 717 | * until finishing nat/sit flush. |
| 718 | */ |
| 719 | retry_flush_nodes: |
| 720 | mutex_lock(&sbi->node_write); |
| 721 | |
| 722 | if (get_pages(sbi, F2FS_DIRTY_NODES)) { |
| 723 | mutex_unlock(&sbi->node_write); |
| 724 | sync_node_pages(sbi, 0, &wbc); |
| 725 | goto retry_flush_nodes; |
| 726 | } |
| 727 | blk_finish_plug(&plug); |
| 728 | } |
| 729 | |
| 730 | static void unblock_operations(struct f2fs_sb_info *sbi) |
| 731 | { |
| 732 | mutex_unlock(&sbi->node_write); |
| 733 | f2fs_unlock_all(sbi); |
| 734 | } |
| 735 | |
| 736 | static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) |
| 737 | { |
| 738 | DEFINE_WAIT(wait); |
| 739 | |
| 740 | for (;;) { |
| 741 | prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); |
| 742 | |
| 743 | if (!get_pages(sbi, F2FS_WRITEBACK)) |
| 744 | break; |
| 745 | |
| 746 | io_schedule(); |
| 747 | } |
| 748 | finish_wait(&sbi->cp_wait, &wait); |
| 749 | } |
| 750 | |
| 751 | static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) |
| 752 | { |
| 753 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
| 754 | nid_t last_nid = 0; |
| 755 | block_t start_blk; |
| 756 | struct page *cp_page; |
| 757 | unsigned int data_sum_blocks, orphan_blocks; |
| 758 | __u32 crc32 = 0; |
| 759 | void *kaddr; |
| 760 | int i; |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 761 | int cp_payload_blks = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 762 | |
Jaegeuk Kim | f8ff141 | 2014-04-15 13:57:55 +0900 | [diff] [blame] | 763 | /* |
| 764 | * This avoids to conduct wrong roll-forward operations and uses |
| 765 | * metapages, so should be called prior to sync_meta_pages below. |
| 766 | */ |
| 767 | discard_next_dnode(sbi); |
| 768 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 769 | /* Flush all the NAT/SIT pages */ |
| 770 | while (get_pages(sbi, F2FS_DIRTY_META)) |
| 771 | sync_meta_pages(sbi, META, LONG_MAX); |
| 772 | |
| 773 | next_free_nid(sbi, &last_nid); |
| 774 | |
| 775 | /* |
| 776 | * modify checkpoint |
| 777 | * version number is already updated |
| 778 | */ |
| 779 | ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); |
| 780 | ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); |
| 781 | ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); |
| 782 | for (i = 0; i < 3; i++) { |
| 783 | ckpt->cur_node_segno[i] = |
| 784 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); |
| 785 | ckpt->cur_node_blkoff[i] = |
| 786 | cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); |
| 787 | ckpt->alloc_type[i + CURSEG_HOT_NODE] = |
| 788 | curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); |
| 789 | } |
| 790 | for (i = 0; i < 3; i++) { |
| 791 | ckpt->cur_data_segno[i] = |
| 792 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); |
| 793 | ckpt->cur_data_blkoff[i] = |
| 794 | cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); |
| 795 | ckpt->alloc_type[i + CURSEG_HOT_DATA] = |
| 796 | curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); |
| 797 | } |
| 798 | |
| 799 | ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); |
| 800 | ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); |
| 801 | ckpt->next_free_nid = cpu_to_le32(last_nid); |
| 802 | |
| 803 | /* 2 cp + n data seg summary + orphan inode blocks */ |
| 804 | data_sum_blocks = npages_for_summary_flush(sbi); |
| 805 | if (data_sum_blocks < 3) |
| 806 | set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); |
| 807 | else |
| 808 | clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); |
| 809 | |
| 810 | orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) |
| 811 | / F2FS_ORPHANS_PER_BLOCK; |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 812 | ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + |
| 813 | orphan_blocks); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 814 | |
| 815 | if (is_umount) { |
| 816 | set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); |
| 817 | ckpt->cp_pack_total_block_count = cpu_to_le32(2 + |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 818 | cp_payload_blks + data_sum_blocks + |
| 819 | orphan_blocks + NR_CURSEG_NODE_TYPE); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 820 | } else { |
| 821 | clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); |
| 822 | ckpt->cp_pack_total_block_count = cpu_to_le32(2 + |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 823 | cp_payload_blks + data_sum_blocks + |
| 824 | orphan_blocks); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 825 | } |
| 826 | |
| 827 | if (sbi->n_orphans) |
| 828 | set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); |
| 829 | else |
| 830 | clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); |
| 831 | |
| 832 | /* update SIT/NAT bitmap */ |
| 833 | get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); |
| 834 | get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); |
| 835 | |
| 836 | crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); |
| 837 | *((__le32 *)((unsigned char *)ckpt + |
| 838 | le32_to_cpu(ckpt->checksum_offset))) |
| 839 | = cpu_to_le32(crc32); |
| 840 | |
| 841 | start_blk = __start_cp_addr(sbi); |
| 842 | |
| 843 | /* write out checkpoint buffer at block 0 */ |
| 844 | cp_page = grab_meta_page(sbi, start_blk++); |
| 845 | kaddr = page_address(cp_page); |
| 846 | memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); |
| 847 | set_page_dirty(cp_page); |
| 848 | f2fs_put_page(cp_page, 1); |
| 849 | |
Changman Lee | c8f5ffb | 2014-05-12 12:27:43 +0900 | [diff] [blame^] | 850 | for (i = 1; i < 1 + cp_payload_blks; i++) { |
| 851 | cp_page = grab_meta_page(sbi, start_blk++); |
| 852 | kaddr = page_address(cp_page); |
| 853 | memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE, |
| 854 | (1 << sbi->log_blocksize)); |
| 855 | set_page_dirty(cp_page); |
| 856 | f2fs_put_page(cp_page, 1); |
| 857 | } |
| 858 | |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 859 | if (sbi->n_orphans) { |
| 860 | write_orphan_inodes(sbi, start_blk); |
| 861 | start_blk += orphan_blocks; |
| 862 | } |
| 863 | |
| 864 | write_data_summaries(sbi, start_blk); |
| 865 | start_blk += data_sum_blocks; |
| 866 | if (is_umount) { |
| 867 | write_node_summaries(sbi, start_blk); |
| 868 | start_blk += NR_CURSEG_NODE_TYPE; |
| 869 | } |
| 870 | |
| 871 | /* writeout checkpoint block */ |
| 872 | cp_page = grab_meta_page(sbi, start_blk); |
| 873 | kaddr = page_address(cp_page); |
| 874 | memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); |
| 875 | set_page_dirty(cp_page); |
| 876 | f2fs_put_page(cp_page, 1); |
| 877 | |
| 878 | /* wait for previous submitted node/meta pages writeback */ |
| 879 | wait_on_all_pages_writeback(sbi); |
| 880 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 881 | filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); |
| 882 | filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 883 | |
| 884 | /* update user_block_counts */ |
| 885 | sbi->last_valid_block_count = sbi->total_valid_block_count; |
| 886 | sbi->alloc_valid_block_count = 0; |
| 887 | |
| 888 | /* Here, we only have one bio having CP pack */ |
| 889 | sync_meta_pages(sbi, META_FLUSH, LONG_MAX); |
| 890 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 891 | if (unlikely(!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 892 | clear_prefree_segments(sbi); |
| 893 | F2FS_RESET_SB_DIRT(sbi); |
| 894 | } |
| 895 | } |
| 896 | |
| 897 | /* |
| 898 | * We guarantee that this checkpoint procedure should not fail. |
| 899 | */ |
| 900 | void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) |
| 901 | { |
| 902 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
| 903 | unsigned long long ckpt_ver; |
| 904 | |
| 905 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); |
| 906 | |
| 907 | mutex_lock(&sbi->cp_mutex); |
| 908 | block_operations(sbi); |
| 909 | |
| 910 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); |
| 911 | |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 912 | f2fs_submit_merged_bio(sbi, DATA, WRITE); |
| 913 | f2fs_submit_merged_bio(sbi, NODE, WRITE); |
| 914 | f2fs_submit_merged_bio(sbi, META, WRITE); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 915 | |
| 916 | /* |
| 917 | * update checkpoint pack index |
| 918 | * Increase the version number so that |
| 919 | * SIT entries and seg summaries are written at correct place |
| 920 | */ |
| 921 | ckpt_ver = cur_cp_version(ckpt); |
| 922 | ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); |
| 923 | |
| 924 | /* write cached NAT/SIT entries to NAT/SIT area */ |
| 925 | flush_nat_entries(sbi); |
| 926 | flush_sit_entries(sbi); |
| 927 | |
| 928 | /* unlock all the fs_lock[] in do_checkpoint() */ |
| 929 | do_checkpoint(sbi, is_umount); |
| 930 | |
| 931 | unblock_operations(sbi); |
| 932 | mutex_unlock(&sbi->cp_mutex); |
| 933 | |
Changman Lee | 8b83cd3 | 2014-02-13 15:12:29 +0900 | [diff] [blame] | 934 | stat_inc_cp_count(sbi->stat_info); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 935 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); |
| 936 | } |
| 937 | |
| 938 | void init_orphan_info(struct f2fs_sb_info *sbi) |
| 939 | { |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 940 | spin_lock_init(&sbi->orphan_inode_lock); |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 941 | INIT_LIST_HEAD(&sbi->orphan_inode_list); |
| 942 | sbi->n_orphans = 0; |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 943 | /* |
| 944 | * considering 512 blocks in a segment 8 blocks are needed for cp |
| 945 | * and log segment summaries. Remaining blocks are used to keep |
| 946 | * orphan entries with the limitation one reserved segment |
| 947 | * for cp pack we can have max 1020*504 orphan entries |
| 948 | */ |
| 949 | sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE) |
| 950 | * F2FS_ORPHANS_PER_BLOCK; |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 951 | } |
| 952 | |
| 953 | int __init create_checkpoint_caches(void) |
| 954 | { |
| 955 | orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", |
Gu Zheng | e33dcea | 2014-03-07 18:43:28 +0800 | [diff] [blame] | 956 | sizeof(struct orphan_inode_entry)); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 957 | if (!orphan_entry_slab) |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 958 | return -ENOMEM; |
| 959 | inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry", |
Gu Zheng | e33dcea | 2014-03-07 18:43:28 +0800 | [diff] [blame] | 960 | sizeof(struct dir_inode_entry)); |
Changman Lee | b1a94e8 | 2013-11-15 10:42:51 +0900 | [diff] [blame] | 961 | if (!inode_entry_slab) { |
Linus Torvalds | 8005ecc | 2012-12-20 13:54:51 -0800 | [diff] [blame] | 962 | kmem_cache_destroy(orphan_entry_slab); |
| 963 | return -ENOMEM; |
| 964 | } |
| 965 | return 0; |
| 966 | } |
| 967 | |
| 968 | void destroy_checkpoint_caches(void) |
| 969 | { |
| 970 | kmem_cache_destroy(orphan_entry_slab); |
| 971 | kmem_cache_destroy(inode_entry_slab); |
| 972 | } |