blob: 0431f8e31351fc13289048d34f6b78928dac4010 [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include <trace/events/f2fs.h>
23
Gu Zhengaa9c8b02014-02-21 18:08:29 +080024#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
25
Linus Torvalds8005ecc2012-12-20 13:54:51 -080026static struct kmem_cache *nat_entry_slab;
27static struct kmem_cache *free_nid_slab;
28
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090029static inline bool available_free_memory(struct f2fs_nm_info *nm_i, int type)
30{
31 struct sysinfo val;
32 unsigned long mem_size = 0;
33
34 si_meminfo(&val);
35 if (type == FREE_NIDS)
36 mem_size = nm_i->fcnt * sizeof(struct free_nid);
37 else if (type == NAT_ENTRIES)
38 mem_size += nm_i->nat_cnt * sizeof(struct nat_entry);
39 mem_size >>= 12;
40
41 /* give 50:50 memory for free nids and nat caches respectively */
42 return (mem_size < ((val.totalram * nm_i->ram_thresh) >> 11));
43}
44
Linus Torvalds8005ecc2012-12-20 13:54:51 -080045static void clear_node_page_dirty(struct page *page)
46{
47 struct address_space *mapping = page->mapping;
48 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
49 unsigned int long flags;
50
51 if (PageDirty(page)) {
52 spin_lock_irqsave(&mapping->tree_lock, flags);
53 radix_tree_tag_clear(&mapping->page_tree,
54 page_index(page),
55 PAGECACHE_TAG_DIRTY);
56 spin_unlock_irqrestore(&mapping->tree_lock, flags);
57
58 clear_page_dirty_for_io(page);
59 dec_page_count(sbi, F2FS_DIRTY_NODES);
60 }
61 ClearPageUptodate(page);
62}
63
64static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
65{
66 pgoff_t index = current_nat_addr(sbi, nid);
67 return get_meta_page(sbi, index);
68}
69
70static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
71{
72 struct page *src_page;
73 struct page *dst_page;
74 pgoff_t src_off;
75 pgoff_t dst_off;
76 void *src_addr;
77 void *dst_addr;
78 struct f2fs_nm_info *nm_i = NM_I(sbi);
79
80 src_off = current_nat_addr(sbi, nid);
81 dst_off = next_nat_addr(sbi, src_off);
82
83 /* get current nat block page with lock */
84 src_page = get_meta_page(sbi, src_off);
85
86 /* Dirty src_page means that it is already the new target NAT page. */
87 if (PageDirty(src_page))
88 return src_page;
89
90 dst_page = grab_meta_page(sbi, dst_off);
91
92 src_addr = page_address(src_page);
93 dst_addr = page_address(dst_page);
94 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
95 set_page_dirty(dst_page);
96 f2fs_put_page(src_page, 1);
97
98 set_to_next_nat(nm_i, nid);
99
100 return dst_page;
101}
102
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800103static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
104{
105 return radix_tree_lookup(&nm_i->nat_root, n);
106}
107
108static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
109 nid_t start, unsigned int nr, struct nat_entry **ep)
110{
111 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
112}
113
114static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
115{
116 list_del(&e->list);
117 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
118 nm_i->nat_cnt--;
119 kmem_cache_free(nat_entry_slab, e);
120}
121
122int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
123{
124 struct f2fs_nm_info *nm_i = NM_I(sbi);
125 struct nat_entry *e;
126 int is_cp = 1;
127
128 read_lock(&nm_i->nat_tree_lock);
129 e = __lookup_nat_cache(nm_i, nid);
130 if (e && !e->checkpointed)
131 is_cp = 0;
132 read_unlock(&nm_i->nat_tree_lock);
133 return is_cp;
134}
135
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900136bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid)
137{
138 struct f2fs_nm_info *nm_i = NM_I(sbi);
139 struct nat_entry *e;
140 bool fsync_done = false;
141
142 read_lock(&nm_i->nat_tree_lock);
143 e = __lookup_nat_cache(nm_i, nid);
144 if (e)
145 fsync_done = e->fsync_done;
146 read_unlock(&nm_i->nat_tree_lock);
147 return fsync_done;
148}
149
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800150static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
151{
152 struct nat_entry *new;
153
154 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
155 if (!new)
156 return NULL;
157 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
158 kmem_cache_free(nat_entry_slab, new);
159 return NULL;
160 }
161 memset(new, 0, sizeof(struct nat_entry));
162 nat_set_nid(new, nid);
Jaegeuk Kimc5ed3b72014-02-21 13:17:22 +0900163 new->checkpointed = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800164 list_add_tail(&new->list, &nm_i->nat_entries);
165 nm_i->nat_cnt++;
166 return new;
167}
168
169static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
170 struct f2fs_nat_entry *ne)
171{
172 struct nat_entry *e;
173retry:
174 write_lock(&nm_i->nat_tree_lock);
175 e = __lookup_nat_cache(nm_i, nid);
176 if (!e) {
177 e = grab_nat_entry(nm_i, nid);
178 if (!e) {
179 write_unlock(&nm_i->nat_tree_lock);
180 goto retry;
181 }
182 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
183 nat_set_ino(e, le32_to_cpu(ne->ino));
184 nat_set_version(e, ne->version);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800185 }
186 write_unlock(&nm_i->nat_tree_lock);
187}
188
189static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900190 block_t new_blkaddr, bool fsync_done)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800191{
192 struct f2fs_nm_info *nm_i = NM_I(sbi);
193 struct nat_entry *e;
194retry:
195 write_lock(&nm_i->nat_tree_lock);
196 e = __lookup_nat_cache(nm_i, ni->nid);
197 if (!e) {
198 e = grab_nat_entry(nm_i, ni->nid);
199 if (!e) {
200 write_unlock(&nm_i->nat_tree_lock);
201 goto retry;
202 }
203 e->ni = *ni;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800204 f2fs_bug_on(ni->blk_addr == NEW_ADDR);
205 } else if (new_blkaddr == NEW_ADDR) {
206 /*
207 * when nid is reallocated,
208 * previous nat entry can be remained in nat cache.
209 * So, reinitialize it with new information.
210 */
211 e->ni = *ni;
212 f2fs_bug_on(ni->blk_addr != NULL_ADDR);
213 }
214
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800215 /* sanity check */
216 f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
217 f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
218 new_blkaddr == NULL_ADDR);
219 f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
220 new_blkaddr == NEW_ADDR);
221 f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
222 nat_get_blkaddr(e) != NULL_ADDR &&
223 new_blkaddr == NEW_ADDR);
224
225 /* increament version no as node is removed */
226 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
227 unsigned char version = nat_get_version(e);
228 nat_set_version(e, inc_node_version(version));
229 }
230
231 /* change address */
232 nat_set_blkaddr(e, new_blkaddr);
233 __set_nat_cache_dirty(nm_i, e);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900234
235 /* update fsync_mark if its inode nat entry is still alive */
236 e = __lookup_nat_cache(nm_i, ni->ino);
237 if (e)
238 e->fsync_done = fsync_done;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800239 write_unlock(&nm_i->nat_tree_lock);
240}
241
242int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
243{
244 struct f2fs_nm_info *nm_i = NM_I(sbi);
245
Jaegeuk Kim206d2e72014-03-19 13:45:52 +0900246 if (available_free_memory(nm_i, NAT_ENTRIES))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800247 return 0;
248
249 write_lock(&nm_i->nat_tree_lock);
250 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
251 struct nat_entry *ne;
252 ne = list_first_entry(&nm_i->nat_entries,
253 struct nat_entry, list);
254 __del_from_nat_cache(nm_i, ne);
255 nr_shrink--;
256 }
257 write_unlock(&nm_i->nat_tree_lock);
258 return nr_shrink;
259}
260
261/*
262 * This function returns always success
263 */
264void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
265{
266 struct f2fs_nm_info *nm_i = NM_I(sbi);
267 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
268 struct f2fs_summary_block *sum = curseg->sum_blk;
269 nid_t start_nid = START_NID(nid);
270 struct f2fs_nat_block *nat_blk;
271 struct page *page = NULL;
272 struct f2fs_nat_entry ne;
273 struct nat_entry *e;
274 int i;
275
276 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
277 ni->nid = nid;
278
279 /* Check nat cache */
280 read_lock(&nm_i->nat_tree_lock);
281 e = __lookup_nat_cache(nm_i, nid);
282 if (e) {
283 ni->ino = nat_get_ino(e);
284 ni->blk_addr = nat_get_blkaddr(e);
285 ni->version = nat_get_version(e);
286 }
287 read_unlock(&nm_i->nat_tree_lock);
288 if (e)
289 return;
290
291 /* Check current segment summary */
292 mutex_lock(&curseg->curseg_mutex);
293 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
294 if (i >= 0) {
295 ne = nat_in_journal(sum, i);
296 node_info_from_raw_nat(ni, &ne);
297 }
298 mutex_unlock(&curseg->curseg_mutex);
299 if (i >= 0)
300 goto cache;
301
302 /* Fill node_info from nat page */
303 page = get_current_nat_page(sbi, start_nid);
304 nat_blk = (struct f2fs_nat_block *)page_address(page);
305 ne = nat_blk->entries[nid - start_nid];
306 node_info_from_raw_nat(ni, &ne);
307 f2fs_put_page(page, 1);
308cache:
309 /* cache nat entry */
310 cache_nat_entry(NM_I(sbi), nid, &ne);
311}
312
313/*
314 * The maximum depth is four.
315 * Offset[0] will have raw inode offset.
316 */
317static int get_node_path(struct f2fs_inode_info *fi, long block,
318 int offset[4], unsigned int noffset[4])
319{
320 const long direct_index = ADDRS_PER_INODE(fi);
321 const long direct_blks = ADDRS_PER_BLOCK;
322 const long dptrs_per_blk = NIDS_PER_BLOCK;
323 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
324 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
325 int n = 0;
326 int level = 0;
327
328 noffset[0] = 0;
329
330 if (block < direct_index) {
331 offset[n] = block;
332 goto got;
333 }
334 block -= direct_index;
335 if (block < direct_blks) {
336 offset[n++] = NODE_DIR1_BLOCK;
337 noffset[n] = 1;
338 offset[n] = block;
339 level = 1;
340 goto got;
341 }
342 block -= direct_blks;
343 if (block < direct_blks) {
344 offset[n++] = NODE_DIR2_BLOCK;
345 noffset[n] = 2;
346 offset[n] = block;
347 level = 1;
348 goto got;
349 }
350 block -= direct_blks;
351 if (block < indirect_blks) {
352 offset[n++] = NODE_IND1_BLOCK;
353 noffset[n] = 3;
354 offset[n++] = block / direct_blks;
355 noffset[n] = 4 + offset[n - 1];
356 offset[n] = block % direct_blks;
357 level = 2;
358 goto got;
359 }
360 block -= indirect_blks;
361 if (block < indirect_blks) {
362 offset[n++] = NODE_IND2_BLOCK;
363 noffset[n] = 4 + dptrs_per_blk;
364 offset[n++] = block / direct_blks;
365 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
366 offset[n] = block % direct_blks;
367 level = 2;
368 goto got;
369 }
370 block -= indirect_blks;
371 if (block < dindirect_blks) {
372 offset[n++] = NODE_DIND_BLOCK;
373 noffset[n] = 5 + (dptrs_per_blk * 2);
374 offset[n++] = block / indirect_blks;
375 noffset[n] = 6 + (dptrs_per_blk * 2) +
376 offset[n - 1] * (dptrs_per_blk + 1);
377 offset[n++] = (block / direct_blks) % dptrs_per_blk;
378 noffset[n] = 7 + (dptrs_per_blk * 2) +
379 offset[n - 2] * (dptrs_per_blk + 1) +
380 offset[n - 1];
381 offset[n] = block % direct_blks;
382 level = 3;
383 goto got;
384 } else {
385 BUG();
386 }
387got:
388 return level;
389}
390
391/*
392 * Caller should call f2fs_put_dnode(dn).
Changman Leeb1a94e82013-11-15 10:42:51 +0900393 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
394 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800395 * In the case of RDONLY_NODE, we don't need to care about mutex.
396 */
397int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
398{
399 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
400 struct page *npage[4];
401 struct page *parent;
402 int offset[4];
403 unsigned int noffset[4];
404 nid_t nids[4];
405 int level, i;
406 int err = 0;
407
408 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
409
410 nids[0] = dn->inode->i_ino;
411 npage[0] = dn->inode_page;
412
413 if (!npage[0]) {
414 npage[0] = get_node_page(sbi, nids[0]);
415 if (IS_ERR(npage[0]))
416 return PTR_ERR(npage[0]);
417 }
418 parent = npage[0];
419 if (level != 0)
420 nids[1] = get_nid(parent, offset[0], true);
421 dn->inode_page = npage[0];
422 dn->inode_page_locked = true;
423
424 /* get indirect or direct nodes */
425 for (i = 1; i <= level; i++) {
426 bool done = false;
427
428 if (!nids[i] && mode == ALLOC_NODE) {
429 /* alloc new node */
430 if (!alloc_nid(sbi, &(nids[i]))) {
431 err = -ENOSPC;
432 goto release_pages;
433 }
434
435 dn->nid = nids[i];
436 npage[i] = new_node_page(dn, noffset[i], NULL);
437 if (IS_ERR(npage[i])) {
438 alloc_nid_failed(sbi, nids[i]);
439 err = PTR_ERR(npage[i]);
440 goto release_pages;
441 }
442
443 set_nid(parent, offset[i - 1], nids[i], i == 1);
444 alloc_nid_done(sbi, nids[i]);
445 done = true;
446 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
447 npage[i] = get_node_page_ra(parent, offset[i - 1]);
448 if (IS_ERR(npage[i])) {
449 err = PTR_ERR(npage[i]);
450 goto release_pages;
451 }
452 done = true;
453 }
454 if (i == 1) {
455 dn->inode_page_locked = false;
456 unlock_page(parent);
457 } else {
458 f2fs_put_page(parent, 1);
459 }
460
461 if (!done) {
462 npage[i] = get_node_page(sbi, nids[i]);
463 if (IS_ERR(npage[i])) {
464 err = PTR_ERR(npage[i]);
465 f2fs_put_page(npage[0], 0);
466 goto release_out;
467 }
468 }
469 if (i < level) {
470 parent = npage[i];
471 nids[i + 1] = get_nid(parent, offset[i], false);
472 }
473 }
474 dn->nid = nids[level];
475 dn->ofs_in_node = offset[level];
476 dn->node_page = npage[level];
477 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
478 return 0;
479
480release_pages:
481 f2fs_put_page(parent, 1);
482 if (i > 1)
483 f2fs_put_page(npage[0], 0);
484release_out:
485 dn->inode_page = NULL;
486 dn->node_page = NULL;
487 return err;
488}
489
490static void truncate_node(struct dnode_of_data *dn)
491{
492 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
493 struct node_info ni;
494
495 get_node_info(sbi, dn->nid, &ni);
496 if (dn->inode->i_blocks == 0) {
497 f2fs_bug_on(ni.blk_addr != NULL_ADDR);
498 goto invalidate;
499 }
500 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
501
502 /* Deallocate node address */
503 invalidate_blocks(sbi, ni.blk_addr);
Changman Leeb1a94e82013-11-15 10:42:51 +0900504 dec_valid_node_count(sbi, dn->inode);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900505 set_node_addr(sbi, &ni, NULL_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800506
507 if (dn->nid == dn->inode->i_ino) {
508 remove_orphan_inode(sbi, dn->nid);
509 dec_valid_inode_count(sbi);
510 } else {
511 sync_inode_page(dn);
512 }
513invalidate:
514 clear_node_page_dirty(dn->node_page);
515 F2FS_SET_SB_DIRT(sbi);
516
517 f2fs_put_page(dn->node_page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900518
519 invalidate_mapping_pages(NODE_MAPPING(sbi),
520 dn->node_page->index, dn->node_page->index);
521
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800522 dn->node_page = NULL;
523 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
524}
525
526static int truncate_dnode(struct dnode_of_data *dn)
527{
528 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
529 struct page *page;
530
531 if (dn->nid == 0)
532 return 1;
533
534 /* get direct node */
535 page = get_node_page(sbi, dn->nid);
536 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
537 return 1;
538 else if (IS_ERR(page))
539 return PTR_ERR(page);
540
541 /* Make dnode_of_data for parameter */
542 dn->node_page = page;
543 dn->ofs_in_node = 0;
544 truncate_data_blocks(dn);
545 truncate_node(dn);
546 return 1;
547}
548
549static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
550 int ofs, int depth)
551{
552 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
553 struct dnode_of_data rdn = *dn;
554 struct page *page;
555 struct f2fs_node *rn;
556 nid_t child_nid;
557 unsigned int child_nofs;
558 int freed = 0;
559 int i, ret;
560
561 if (dn->nid == 0)
562 return NIDS_PER_BLOCK + 1;
563
564 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
565
566 page = get_node_page(sbi, dn->nid);
567 if (IS_ERR(page)) {
568 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
569 return PTR_ERR(page);
570 }
571
572 rn = F2FS_NODE(page);
573 if (depth < 3) {
574 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
575 child_nid = le32_to_cpu(rn->in.nid[i]);
576 if (child_nid == 0)
577 continue;
578 rdn.nid = child_nid;
579 ret = truncate_dnode(&rdn);
580 if (ret < 0)
581 goto out_err;
582 set_nid(page, i, 0, false);
583 }
584 } else {
585 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
586 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
587 child_nid = le32_to_cpu(rn->in.nid[i]);
588 if (child_nid == 0) {
589 child_nofs += NIDS_PER_BLOCK + 1;
590 continue;
591 }
592 rdn.nid = child_nid;
593 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
594 if (ret == (NIDS_PER_BLOCK + 1)) {
595 set_nid(page, i, 0, false);
596 child_nofs += ret;
597 } else if (ret < 0 && ret != -ENOENT) {
598 goto out_err;
599 }
600 }
601 freed = child_nofs;
602 }
603
604 if (!ofs) {
605 /* remove current indirect node */
606 dn->node_page = page;
607 truncate_node(dn);
608 freed++;
609 } else {
610 f2fs_put_page(page, 1);
611 }
612 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
613 return freed;
614
615out_err:
616 f2fs_put_page(page, 1);
617 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
618 return ret;
619}
620
621static int truncate_partial_nodes(struct dnode_of_data *dn,
622 struct f2fs_inode *ri, int *offset, int depth)
623{
624 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
625 struct page *pages[2];
626 nid_t nid[3];
627 nid_t child_nid;
628 int err = 0;
629 int i;
630 int idx = depth - 2;
631
632 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
633 if (!nid[0])
634 return 0;
635
636 /* get indirect nodes in the path */
Changman Leeb1a94e82013-11-15 10:42:51 +0900637 for (i = 0; i < idx + 1; i++) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800638 /* refernece count'll be increased */
639 pages[i] = get_node_page(sbi, nid[i]);
640 if (IS_ERR(pages[i])) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800641 err = PTR_ERR(pages[i]);
Changman Leeb1a94e82013-11-15 10:42:51 +0900642 idx = i - 1;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800643 goto fail;
644 }
645 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
646 }
647
648 /* free direct nodes linked to a partial indirect node */
Changman Leeb1a94e82013-11-15 10:42:51 +0900649 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800650 child_nid = get_nid(pages[idx], i, false);
651 if (!child_nid)
652 continue;
653 dn->nid = child_nid;
654 err = truncate_dnode(dn);
655 if (err < 0)
656 goto fail;
657 set_nid(pages[idx], i, 0, false);
658 }
659
Changman Leeb1a94e82013-11-15 10:42:51 +0900660 if (offset[idx + 1] == 0) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800661 dn->node_page = pages[idx];
662 dn->nid = nid[idx];
663 truncate_node(dn);
664 } else {
665 f2fs_put_page(pages[idx], 1);
666 }
667 offset[idx]++;
Changman Leeb1a94e82013-11-15 10:42:51 +0900668 offset[idx + 1] = 0;
669 idx--;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800670fail:
Changman Leeb1a94e82013-11-15 10:42:51 +0900671 for (i = idx; i >= 0; i--)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800672 f2fs_put_page(pages[i], 1);
673
674 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
675
676 return err;
677}
678
679/*
680 * All the block addresses of data and nodes should be nullified.
681 */
682int truncate_inode_blocks(struct inode *inode, pgoff_t from)
683{
684 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800685 int err = 0, cont = 1;
686 int level, offset[4], noffset[4];
687 unsigned int nofs = 0;
Changman Leeb1a94e82013-11-15 10:42:51 +0900688 struct f2fs_inode *ri;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800689 struct dnode_of_data dn;
690 struct page *page;
691
692 trace_f2fs_truncate_inode_blocks_enter(inode, from);
693
694 level = get_node_path(F2FS_I(inode), from, offset, noffset);
695restart:
696 page = get_node_page(sbi, inode->i_ino);
697 if (IS_ERR(page)) {
698 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
699 return PTR_ERR(page);
700 }
701
702 set_new_dnode(&dn, inode, page, NULL, 0);
703 unlock_page(page);
704
Changman Leeb1a94e82013-11-15 10:42:51 +0900705 ri = F2FS_INODE(page);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800706 switch (level) {
707 case 0:
708 case 1:
709 nofs = noffset[1];
710 break;
711 case 2:
712 nofs = noffset[1];
713 if (!offset[level - 1])
714 goto skip_partial;
Changman Leeb1a94e82013-11-15 10:42:51 +0900715 err = truncate_partial_nodes(&dn, ri, offset, level);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800716 if (err < 0 && err != -ENOENT)
717 goto fail;
718 nofs += 1 + NIDS_PER_BLOCK;
719 break;
720 case 3:
721 nofs = 5 + 2 * NIDS_PER_BLOCK;
722 if (!offset[level - 1])
723 goto skip_partial;
Changman Leeb1a94e82013-11-15 10:42:51 +0900724 err = truncate_partial_nodes(&dn, ri, offset, level);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800725 if (err < 0 && err != -ENOENT)
726 goto fail;
727 break;
728 default:
729 BUG();
730 }
731
732skip_partial:
733 while (cont) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900734 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800735 switch (offset[0]) {
736 case NODE_DIR1_BLOCK:
737 case NODE_DIR2_BLOCK:
738 err = truncate_dnode(&dn);
739 break;
740
741 case NODE_IND1_BLOCK:
742 case NODE_IND2_BLOCK:
743 err = truncate_nodes(&dn, nofs, offset[1], 2);
744 break;
745
746 case NODE_DIND_BLOCK:
747 err = truncate_nodes(&dn, nofs, offset[1], 3);
748 cont = 0;
749 break;
750
751 default:
752 BUG();
753 }
754 if (err < 0 && err != -ENOENT)
755 goto fail;
756 if (offset[1] == 0 &&
Changman Leeb1a94e82013-11-15 10:42:51 +0900757 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800758 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900759 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800760 f2fs_put_page(page, 1);
761 goto restart;
762 }
Jaegeuk Kim4b66d802014-03-18 13:29:07 +0900763 f2fs_wait_on_page_writeback(page, NODE);
Changman Leeb1a94e82013-11-15 10:42:51 +0900764 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800765 set_page_dirty(page);
766 unlock_page(page);
767 }
768 offset[1] = 0;
769 offset[0]++;
770 nofs += err;
771 }
772fail:
773 f2fs_put_page(page, 0);
774 trace_f2fs_truncate_inode_blocks_exit(inode, err);
775 return err > 0 ? 0 : err;
776}
777
778int truncate_xattr_node(struct inode *inode, struct page *page)
779{
780 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
781 nid_t nid = F2FS_I(inode)->i_xattr_nid;
782 struct dnode_of_data dn;
783 struct page *npage;
784
785 if (!nid)
786 return 0;
787
788 npage = get_node_page(sbi, nid);
789 if (IS_ERR(npage))
790 return PTR_ERR(npage);
791
792 F2FS_I(inode)->i_xattr_nid = 0;
793
794 /* need to do checkpoint during fsync */
795 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
796
797 set_new_dnode(&dn, inode, page, npage, nid);
798
799 if (page)
Changman Leeb1a94e82013-11-15 10:42:51 +0900800 dn.inode_page_locked = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800801 truncate_node(&dn);
802 return 0;
803}
804
805/*
Changman Leeb1a94e82013-11-15 10:42:51 +0900806 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
807 * f2fs_unlock_op().
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800808 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900809void remove_inode_page(struct inode *inode)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800810{
811 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
812 struct page *page;
813 nid_t ino = inode->i_ino;
814 struct dnode_of_data dn;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800815
816 page = get_node_page(sbi, ino);
817 if (IS_ERR(page))
Changman Leeb1a94e82013-11-15 10:42:51 +0900818 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800819
Changman Leeb1a94e82013-11-15 10:42:51 +0900820 if (truncate_xattr_node(inode, page)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800821 f2fs_put_page(page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900822 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800823 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800824 /* 0 is possible, after f2fs_new_inode() is failed */
825 f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
826 set_new_dnode(&dn, inode, page, page, ino);
827 truncate_node(&dn);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800828}
829
830struct page *new_inode_page(struct inode *inode, const struct qstr *name)
831{
832 struct dnode_of_data dn;
833
834 /* allocate inode page for new inode */
835 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
836
837 /* caller should f2fs_put_page(page, 1); */
838 return new_node_page(&dn, 0, NULL);
839}
840
841struct page *new_node_page(struct dnode_of_data *dn,
842 unsigned int ofs, struct page *ipage)
843{
844 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800845 struct node_info old_ni, new_ni;
846 struct page *page;
847 int err;
848
Changman Leeb1a94e82013-11-15 10:42:51 +0900849 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800850 return ERR_PTR(-EPERM);
851
Jaegeuk Kim4b66d802014-03-18 13:29:07 +0900852 page = grab_cache_page_write_begin(NODE_MAPPING(sbi),
853 dn->nid, AOP_FLAG_NOFS);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800854 if (!page)
855 return ERR_PTR(-ENOMEM);
856
Changman Leeb1a94e82013-11-15 10:42:51 +0900857 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800858 err = -ENOSPC;
859 goto fail;
860 }
861
862 get_node_info(sbi, dn->nid, &old_ni);
863
864 /* Reinitialize old_ni with new node page */
865 f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
866 new_ni = old_ni;
867 new_ni.ino = dn->inode->i_ino;
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900868 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800869
870 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
871 set_cold_node(dn->inode, page);
872 SetPageUptodate(page);
873 set_page_dirty(page);
874
Chao Yueea95c42014-03-17 16:35:06 +0800875 if (f2fs_has_xattr_block(ofs))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800876 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
877
878 dn->node_page = page;
879 if (ipage)
880 update_inode(dn->inode, ipage);
881 else
882 sync_inode_page(dn);
883 if (ofs == 0)
884 inc_valid_inode_count(sbi);
885
886 return page;
887
888fail:
889 clear_node_page_dirty(page);
890 f2fs_put_page(page, 1);
891 return ERR_PTR(err);
892}
893
894/*
895 * Caller should do after getting the following values.
896 * 0: f2fs_put_page(page, 0)
897 * LOCKED_PAGE: f2fs_put_page(page, 1)
898 * error: nothing
899 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900900static int read_node_page(struct page *page, int rw)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800901{
902 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
903 struct node_info ni;
904
905 get_node_info(sbi, page->index, &ni);
906
Changman Leeb1a94e82013-11-15 10:42:51 +0900907 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800908 f2fs_put_page(page, 1);
909 return -ENOENT;
910 }
911
912 if (PageUptodate(page))
913 return LOCKED_PAGE;
914
Changman Leeb1a94e82013-11-15 10:42:51 +0900915 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800916}
917
918/*
919 * Readahead a node page
920 */
921void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
922{
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800923 struct page *apage;
924 int err;
925
Changman Leeb1a94e82013-11-15 10:42:51 +0900926 apage = find_get_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800927 if (apage && PageUptodate(apage)) {
928 f2fs_put_page(apage, 0);
929 return;
930 }
931 f2fs_put_page(apage, 0);
932
Changman Leeb1a94e82013-11-15 10:42:51 +0900933 apage = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800934 if (!apage)
935 return;
936
937 err = read_node_page(apage, READA);
938 if (err == 0)
939 f2fs_put_page(apage, 0);
940 else if (err == LOCKED_PAGE)
941 f2fs_put_page(apage, 1);
942}
943
944struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
945{
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800946 struct page *page;
947 int err;
948repeat:
Jaegeuk Kim4b66d802014-03-18 13:29:07 +0900949 page = grab_cache_page_write_begin(NODE_MAPPING(sbi),
950 nid, AOP_FLAG_NOFS);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800951 if (!page)
952 return ERR_PTR(-ENOMEM);
953
954 err = read_node_page(page, READ_SYNC);
955 if (err < 0)
956 return ERR_PTR(err);
957 else if (err == LOCKED_PAGE)
958 goto got_it;
959
960 lock_page(page);
Jaegeuk Kima68f2892014-04-01 17:38:26 +0900961 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800962 f2fs_put_page(page, 1);
963 return ERR_PTR(-EIO);
964 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900965 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800966 f2fs_put_page(page, 1);
967 goto repeat;
968 }
969got_it:
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800970 mark_page_accessed(page);
971 return page;
972}
973
974/*
975 * Return a locked page for the desired node page.
976 * And, readahead MAX_RA_NODE number of node pages.
977 */
978struct page *get_node_page_ra(struct page *parent, int start)
979{
980 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800981 struct blk_plug plug;
982 struct page *page;
983 int err, i, end;
984 nid_t nid;
985
986 /* First, try getting the desired direct node. */
987 nid = get_nid(parent, start, false);
988 if (!nid)
989 return ERR_PTR(-ENOENT);
990repeat:
Changman Leeb1a94e82013-11-15 10:42:51 +0900991 page = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800992 if (!page)
993 return ERR_PTR(-ENOMEM);
994
995 err = read_node_page(page, READ_SYNC);
996 if (err < 0)
997 return ERR_PTR(err);
998 else if (err == LOCKED_PAGE)
999 goto page_hit;
1000
1001 blk_start_plug(&plug);
1002
1003 /* Then, try readahead for siblings of the desired node */
1004 end = start + MAX_RA_NODE;
1005 end = min(end, NIDS_PER_BLOCK);
1006 for (i = start + 1; i < end; i++) {
1007 nid = get_nid(parent, i, false);
1008 if (!nid)
1009 continue;
1010 ra_node_page(sbi, nid);
1011 }
1012
1013 blk_finish_plug(&plug);
1014
1015 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001016 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001017 f2fs_put_page(page, 1);
1018 goto repeat;
1019 }
1020page_hit:
Changman Leeb1a94e82013-11-15 10:42:51 +09001021 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001022 f2fs_put_page(page, 1);
1023 return ERR_PTR(-EIO);
1024 }
1025 mark_page_accessed(page);
1026 return page;
1027}
1028
1029void sync_inode_page(struct dnode_of_data *dn)
1030{
1031 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1032 update_inode(dn->inode, dn->node_page);
1033 } else if (dn->inode_page) {
1034 if (!dn->inode_page_locked)
1035 lock_page(dn->inode_page);
1036 update_inode(dn->inode, dn->inode_page);
1037 if (!dn->inode_page_locked)
1038 unlock_page(dn->inode_page);
1039 } else {
1040 update_inode_page(dn->inode);
1041 }
1042}
1043
1044int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1045 struct writeback_control *wbc)
1046{
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001047 pgoff_t index, end;
1048 struct pagevec pvec;
1049 int step = ino ? 2 : 0;
1050 int nwritten = 0, wrote = 0;
1051
1052 pagevec_init(&pvec, 0);
1053
1054next_step:
1055 index = 0;
1056 end = LONG_MAX;
1057
1058 while (index <= end) {
1059 int i, nr_pages;
Changman Leeb1a94e82013-11-15 10:42:51 +09001060 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001061 PAGECACHE_TAG_DIRTY,
1062 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1063 if (nr_pages == 0)
1064 break;
1065
1066 for (i = 0; i < nr_pages; i++) {
1067 struct page *page = pvec.pages[i];
1068
1069 /*
1070 * flushing sequence with step:
1071 * 0. indirect nodes
1072 * 1. dentry dnodes
1073 * 2. file dnodes
1074 */
1075 if (step == 0 && IS_DNODE(page))
1076 continue;
1077 if (step == 1 && (!IS_DNODE(page) ||
1078 is_cold_node(page)))
1079 continue;
1080 if (step == 2 && (!IS_DNODE(page) ||
1081 !is_cold_node(page)))
1082 continue;
1083
1084 /*
1085 * If an fsync mode,
1086 * we should not skip writing node pages.
1087 */
1088 if (ino && ino_of_node(page) == ino)
1089 lock_page(page);
1090 else if (!trylock_page(page))
1091 continue;
1092
Changman Leeb1a94e82013-11-15 10:42:51 +09001093 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001094continue_unlock:
1095 unlock_page(page);
1096 continue;
1097 }
1098 if (ino && ino_of_node(page) != ino)
1099 goto continue_unlock;
1100
1101 if (!PageDirty(page)) {
1102 /* someone wrote it for us */
1103 goto continue_unlock;
1104 }
1105
1106 if (!clear_page_dirty_for_io(page))
1107 goto continue_unlock;
1108
1109 /* called by fsync() */
1110 if (ino && IS_DNODE(page)) {
1111 int mark = !is_checkpointed_node(sbi, ino);
1112 set_fsync_mark(page, 1);
1113 if (IS_INODE(page))
1114 set_dentry_mark(page, mark);
1115 nwritten++;
1116 } else {
1117 set_fsync_mark(page, 0);
1118 set_dentry_mark(page, 0);
1119 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001120 NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001121 wrote++;
1122
1123 if (--wbc->nr_to_write == 0)
1124 break;
1125 }
1126 pagevec_release(&pvec);
1127 cond_resched();
1128
1129 if (wbc->nr_to_write == 0) {
1130 step = 2;
1131 break;
1132 }
1133 }
1134
1135 if (step < 2) {
1136 step++;
1137 goto next_step;
1138 }
1139
1140 if (wrote)
Changman Leeb1a94e82013-11-15 10:42:51 +09001141 f2fs_submit_merged_bio(sbi, NODE, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001142 return nwritten;
1143}
1144
1145int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1146{
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001147 pgoff_t index = 0, end = LONG_MAX;
1148 struct pagevec pvec;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001149 int ret2 = 0, ret = 0;
1150
1151 pagevec_init(&pvec, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +09001152
1153 while (index <= end) {
1154 int i, nr_pages;
1155 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1156 PAGECACHE_TAG_WRITEBACK,
1157 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1158 if (nr_pages == 0)
1159 break;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001160
1161 for (i = 0; i < nr_pages; i++) {
1162 struct page *page = pvec.pages[i];
1163
1164 /* until radix tree lookup accepts end_index */
Changman Leeb1a94e82013-11-15 10:42:51 +09001165 if (unlikely(page->index > end))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001166 continue;
1167
1168 if (ino && ino_of_node(page) == ino) {
Jaegeuk Kim4b66d802014-03-18 13:29:07 +09001169 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001170 if (TestClearPageError(page))
1171 ret = -EIO;
1172 }
1173 }
1174 pagevec_release(&pvec);
1175 cond_resched();
1176 }
1177
Changman Leeb1a94e82013-11-15 10:42:51 +09001178 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001179 ret2 = -ENOSPC;
Changman Leeb1a94e82013-11-15 10:42:51 +09001180 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001181 ret2 = -EIO;
1182 if (!ret)
1183 ret = ret2;
1184 return ret;
1185}
1186
1187static int f2fs_write_node_page(struct page *page,
1188 struct writeback_control *wbc)
1189{
1190 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1191 nid_t nid;
1192 block_t new_addr;
1193 struct node_info ni;
Changman Leeb1a94e82013-11-15 10:42:51 +09001194 struct f2fs_io_info fio = {
1195 .type = NODE,
1196 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1197 };
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001198
Changman Leeb1a94e82013-11-15 10:42:51 +09001199 if (unlikely(sbi->por_doing))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001200 goto redirty_out;
1201
Jaegeuk Kim4b66d802014-03-18 13:29:07 +09001202 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001203
1204 /* get old block addr of this node page */
1205 nid = nid_of_node(page);
1206 f2fs_bug_on(page->index != nid);
1207
1208 get_node_info(sbi, nid, &ni);
1209
1210 /* This page is already truncated */
Changman Leeb1a94e82013-11-15 10:42:51 +09001211 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001212 dec_page_count(sbi, F2FS_DIRTY_NODES);
1213 unlock_page(page);
1214 return 0;
1215 }
1216
1217 if (wbc->for_reclaim)
1218 goto redirty_out;
1219
1220 mutex_lock(&sbi->node_write);
1221 set_page_writeback(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001222 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001223 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001224 dec_page_count(sbi, F2FS_DIRTY_NODES);
1225 mutex_unlock(&sbi->node_write);
1226 unlock_page(page);
1227 return 0;
1228
1229redirty_out:
1230 dec_page_count(sbi, F2FS_DIRTY_NODES);
1231 wbc->pages_skipped++;
Chao Yu5c19ba62014-02-28 10:12:05 +08001232 account_page_redirty(page);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001233 set_page_dirty(page);
1234 return AOP_WRITEPAGE_ACTIVATE;
1235}
1236
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001237static int f2fs_write_node_pages(struct address_space *mapping,
1238 struct writeback_control *wbc)
1239{
1240 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001241 long diff;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001242
1243 /* balancing f2fs's metadata in background */
1244 f2fs_balance_fs_bg(sbi);
1245
1246 /* collect a number of dirty node pages and write together */
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +09001247 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
Jaegeuk Kim823d59f2014-03-18 13:43:05 +09001248 goto skip_write;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001249
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001250 diff = nr_pages_to_write(sbi, NODE, wbc);
Changman Leeb1a94e82013-11-15 10:42:51 +09001251 wbc->sync_mode = WB_SYNC_NONE;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001252 sync_node_pages(sbi, 0, wbc);
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001253 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001254 return 0;
Jaegeuk Kim823d59f2014-03-18 13:43:05 +09001255
1256skip_write:
1257 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1258 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001259}
1260
1261static int f2fs_set_node_page_dirty(struct page *page)
1262{
1263 struct address_space *mapping = page->mapping;
1264 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1265
1266 trace_f2fs_set_page_dirty(page, NODE);
1267
1268 SetPageUptodate(page);
1269 if (!PageDirty(page)) {
1270 __set_page_dirty_nobuffers(page);
1271 inc_page_count(sbi, F2FS_DIRTY_NODES);
1272 SetPagePrivate(page);
1273 return 1;
1274 }
1275 return 0;
1276}
1277
1278static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1279{
1280 struct inode *inode = page->mapping->host;
1281 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1282 if (PageDirty(page))
1283 dec_page_count(sbi, F2FS_DIRTY_NODES);
1284 ClearPagePrivate(page);
1285}
1286
1287static int f2fs_release_node_page(struct page *page, gfp_t wait)
1288{
1289 ClearPagePrivate(page);
1290 return 1;
1291}
1292
1293/*
1294 * Structure of the f2fs node operations
1295 */
1296const struct address_space_operations f2fs_node_aops = {
1297 .writepage = f2fs_write_node_page,
1298 .writepages = f2fs_write_node_pages,
1299 .set_page_dirty = f2fs_set_node_page_dirty,
1300 .invalidatepage = f2fs_invalidate_node_page,
1301 .releasepage = f2fs_release_node_page,
1302};
1303
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001304static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1305 nid_t n)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001306{
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001307 return radix_tree_lookup(&nm_i->free_nid_root, n);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001308}
1309
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001310static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1311 struct free_nid *i)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001312{
1313 list_del(&i->list);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001314 radix_tree_delete(&nm_i->free_nid_root, i->nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001315 kmem_cache_free(free_nid_slab, i);
1316}
1317
1318static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1319{
1320 struct free_nid *i;
1321 struct nat_entry *ne;
1322 bool allocated = false;
1323
Jaegeuk Kim327c57d2014-03-19 13:31:37 +09001324 if (!available_free_memory(nm_i, FREE_NIDS))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001325 return -1;
1326
1327 /* 0 nid should not be used */
Changman Leeb1a94e82013-11-15 10:42:51 +09001328 if (unlikely(nid == 0))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001329 return 0;
1330
1331 if (build) {
1332 /* do not add allocated nids */
1333 read_lock(&nm_i->nat_tree_lock);
1334 ne = __lookup_nat_cache(nm_i, nid);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001335 if (ne &&
1336 (!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001337 allocated = true;
1338 read_unlock(&nm_i->nat_tree_lock);
1339 if (allocated)
1340 return 0;
1341 }
1342
1343 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1344 i->nid = nid;
1345 i->state = NID_NEW;
1346
1347 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001348 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001349 spin_unlock(&nm_i->free_nid_list_lock);
1350 kmem_cache_free(free_nid_slab, i);
1351 return 0;
1352 }
1353 list_add_tail(&i->list, &nm_i->free_nid_list);
1354 nm_i->fcnt++;
1355 spin_unlock(&nm_i->free_nid_list_lock);
1356 return 1;
1357}
1358
1359static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1360{
1361 struct free_nid *i;
1362 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001363 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001364 if (i && i->state == NID_NEW) {
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001365 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001366 nm_i->fcnt--;
1367 }
1368 spin_unlock(&nm_i->free_nid_list_lock);
1369}
1370
1371static void scan_nat_page(struct f2fs_nm_info *nm_i,
1372 struct page *nat_page, nid_t start_nid)
1373{
1374 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1375 block_t blk_addr;
1376 int i;
1377
1378 i = start_nid % NAT_ENTRY_PER_BLOCK;
1379
1380 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1381
Changman Leeb1a94e82013-11-15 10:42:51 +09001382 if (unlikely(start_nid >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001383 break;
1384
1385 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1386 f2fs_bug_on(blk_addr == NEW_ADDR);
1387 if (blk_addr == NULL_ADDR) {
1388 if (add_free_nid(nm_i, start_nid, true) < 0)
1389 break;
1390 }
1391 }
1392}
1393
1394static void build_free_nids(struct f2fs_sb_info *sbi)
1395{
1396 struct f2fs_nm_info *nm_i = NM_I(sbi);
1397 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1398 struct f2fs_summary_block *sum = curseg->sum_blk;
1399 int i = 0;
1400 nid_t nid = nm_i->next_scan_nid;
1401
1402 /* Enough entries */
1403 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1404 return;
1405
1406 /* readahead nat pages to be scanned */
Chao Yu624b14f2014-02-07 16:11:53 +08001407 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001408
1409 while (1) {
1410 struct page *page = get_current_nat_page(sbi, nid);
1411
1412 scan_nat_page(nm_i, page, nid);
1413 f2fs_put_page(page, 1);
1414
1415 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
Changman Leeb1a94e82013-11-15 10:42:51 +09001416 if (unlikely(nid >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001417 nid = 0;
1418
1419 if (i++ == FREE_NID_PAGES)
1420 break;
1421 }
1422
1423 /* go to the next free nat pages to find free nids abundantly */
1424 nm_i->next_scan_nid = nid;
1425
1426 /* find free nids from current sum_pages */
1427 mutex_lock(&curseg->curseg_mutex);
1428 for (i = 0; i < nats_in_cursum(sum); i++) {
1429 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1430 nid = le32_to_cpu(nid_in_journal(sum, i));
1431 if (addr == NULL_ADDR)
1432 add_free_nid(nm_i, nid, true);
1433 else
1434 remove_free_nid(nm_i, nid);
1435 }
1436 mutex_unlock(&curseg->curseg_mutex);
1437}
1438
1439/*
1440 * If this function returns success, caller can obtain a new nid
1441 * from second parameter of this function.
1442 * The returned nid could be used ino as well as nid when inode is created.
1443 */
1444bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1445{
1446 struct f2fs_nm_info *nm_i = NM_I(sbi);
1447 struct free_nid *i = NULL;
1448 struct list_head *this;
1449retry:
Changman Leeb1a94e82013-11-15 10:42:51 +09001450 if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001451 return false;
1452
1453 spin_lock(&nm_i->free_nid_list_lock);
1454
1455 /* We should not use stale free nids created by build_free_nids */
Gu Zhengaa9c8b02014-02-21 18:08:29 +08001456 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001457 f2fs_bug_on(list_empty(&nm_i->free_nid_list));
1458 list_for_each(this, &nm_i->free_nid_list) {
1459 i = list_entry(this, struct free_nid, list);
1460 if (i->state == NID_NEW)
1461 break;
1462 }
1463
1464 f2fs_bug_on(i->state != NID_NEW);
1465 *nid = i->nid;
1466 i->state = NID_ALLOC;
1467 nm_i->fcnt--;
1468 spin_unlock(&nm_i->free_nid_list_lock);
1469 return true;
1470 }
1471 spin_unlock(&nm_i->free_nid_list_lock);
1472
1473 /* Let's scan nat pages and its caches to get free nids */
1474 mutex_lock(&nm_i->build_lock);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001475 build_free_nids(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001476 mutex_unlock(&nm_i->build_lock);
1477 goto retry;
1478}
1479
1480/*
1481 * alloc_nid() should be called prior to this function.
1482 */
1483void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1484{
1485 struct f2fs_nm_info *nm_i = NM_I(sbi);
1486 struct free_nid *i;
1487
1488 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001489 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001490 f2fs_bug_on(!i || i->state != NID_ALLOC);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001491 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001492 spin_unlock(&nm_i->free_nid_list_lock);
1493}
1494
1495/*
1496 * alloc_nid() should be called prior to this function.
1497 */
1498void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1499{
1500 struct f2fs_nm_info *nm_i = NM_I(sbi);
1501 struct free_nid *i;
1502
1503 if (!nid)
1504 return;
1505
1506 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001507 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001508 f2fs_bug_on(!i || i->state != NID_ALLOC);
Jaegeuk Kim327c57d2014-03-19 13:31:37 +09001509 if (!available_free_memory(nm_i, FREE_NIDS)) {
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001510 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001511 } else {
1512 i->state = NID_NEW;
1513 nm_i->fcnt++;
1514 }
1515 spin_unlock(&nm_i->free_nid_list_lock);
1516}
1517
1518void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1519 struct f2fs_summary *sum, struct node_info *ni,
1520 block_t new_blkaddr)
1521{
1522 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001523 set_node_addr(sbi, ni, new_blkaddr, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001524 clear_node_page_dirty(page);
1525}
1526
Chao Yub3606c92014-03-11 13:37:38 +08001527void recover_inline_xattr(struct inode *inode, struct page *page)
1528{
1529 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1530 void *src_addr, *dst_addr;
1531 size_t inline_size;
1532 struct page *ipage;
1533 struct f2fs_inode *ri;
1534
Chao Yu70da8c62014-03-12 15:59:03 +08001535 if (!f2fs_has_inline_xattr(inode))
Chao Yub3606c92014-03-11 13:37:38 +08001536 return;
1537
1538 if (!IS_INODE(page))
1539 return;
1540
1541 ri = F2FS_INODE(page);
1542 if (!(ri->i_inline & F2FS_INLINE_XATTR))
1543 return;
1544
1545 ipage = get_node_page(sbi, inode->i_ino);
1546 f2fs_bug_on(IS_ERR(ipage));
1547
1548 dst_addr = inline_xattr_addr(ipage);
1549 src_addr = inline_xattr_addr(page);
1550 inline_size = inline_xattr_size(inode);
1551
1552 memcpy(dst_addr, src_addr, inline_size);
1553
1554 update_inode(inode, ipage);
1555 f2fs_put_page(ipage, 1);
1556}
1557
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001558bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1559{
1560 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1561 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1562 nid_t new_xnid = nid_of_node(page);
1563 struct node_info ni;
1564
Chao Yub3606c92014-03-11 13:37:38 +08001565 recover_inline_xattr(inode, page);
1566
Chao Yueea95c42014-03-17 16:35:06 +08001567 if (!f2fs_has_xattr_block(ofs_of_node(page)))
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001568 return false;
1569
1570 /* 1: invalidate the previous xattr nid */
1571 if (!prev_xnid)
1572 goto recover_xnid;
1573
1574 /* Deallocate node address */
1575 get_node_info(sbi, prev_xnid, &ni);
1576 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
1577 invalidate_blocks(sbi, ni.blk_addr);
1578 dec_valid_node_count(sbi, inode);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001579 set_node_addr(sbi, &ni, NULL_ADDR, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001580
1581recover_xnid:
1582 /* 2: allocate new xattr nid */
1583 if (unlikely(!inc_valid_node_count(sbi, inode)))
1584 f2fs_bug_on(1);
1585
1586 remove_free_nid(NM_I(sbi), new_xnid);
1587 get_node_info(sbi, new_xnid, &ni);
1588 ni.ino = inode->i_ino;
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001589 set_node_addr(sbi, &ni, NEW_ADDR, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001590 F2FS_I(inode)->i_xattr_nid = new_xnid;
1591
1592 /* 3: update xattr blkaddr */
1593 refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001594 set_node_addr(sbi, &ni, blkaddr, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001595
1596 update_inode_page(inode);
1597 return true;
1598}
1599
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001600int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1601{
Changman Leeb1a94e82013-11-15 10:42:51 +09001602 struct f2fs_inode *src, *dst;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001603 nid_t ino = ino_of_node(page);
1604 struct node_info old_ni, new_ni;
1605 struct page *ipage;
1606
Changman Leeb1a94e82013-11-15 10:42:51 +09001607 ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001608 if (!ipage)
1609 return -ENOMEM;
1610
1611 /* Should not use this inode from free nid list */
1612 remove_free_nid(NM_I(sbi), ino);
1613
1614 get_node_info(sbi, ino, &old_ni);
1615 SetPageUptodate(ipage);
1616 fill_node_footer(ipage, ino, ino, 0, true);
1617
Changman Leeb1a94e82013-11-15 10:42:51 +09001618 src = F2FS_INODE(page);
1619 dst = F2FS_INODE(ipage);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001620
Changman Leeb1a94e82013-11-15 10:42:51 +09001621 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
1622 dst->i_size = 0;
1623 dst->i_blocks = cpu_to_le64(1);
1624 dst->i_links = cpu_to_le32(1);
1625 dst->i_xattr_nid = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001626
1627 new_ni = old_ni;
1628 new_ni.ino = ino;
1629
Changman Leeb1a94e82013-11-15 10:42:51 +09001630 if (unlikely(!inc_valid_node_count(sbi, NULL)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001631 WARN_ON(1);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001632 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001633 inc_valid_inode_count(sbi);
1634 f2fs_put_page(ipage, 1);
1635 return 0;
1636}
1637
Changman Leeb1a94e82013-11-15 10:42:51 +09001638/*
1639 * ra_sum_pages() merge contiguous pages into one bio and submit.
1640 * these pre-readed pages are linked in pages list.
1641 */
1642static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
1643 int start, int nrpages)
1644{
1645 struct page *page;
1646 int page_idx = start;
1647 struct f2fs_io_info fio = {
1648 .type = META,
1649 .rw = READ_SYNC | REQ_META | REQ_PRIO
1650 };
1651
1652 for (; page_idx < start + nrpages; page_idx++) {
1653 /* alloc temporal page for read node summary info*/
1654 page = alloc_page(GFP_F2FS_ZERO);
Gu Zheng12e374b2014-03-07 18:43:36 +08001655 if (!page)
1656 break;
Changman Leeb1a94e82013-11-15 10:42:51 +09001657
1658 lock_page(page);
1659 page->index = page_idx;
1660 list_add_tail(&page->lru, pages);
1661 }
1662
1663 list_for_each_entry(page, pages, lru)
1664 f2fs_submit_page_mbio(sbi, page, page->index, &fio);
1665
1666 f2fs_submit_merged_bio(sbi, META, READ);
Gu Zheng12e374b2014-03-07 18:43:36 +08001667
1668 return page_idx - start;
Changman Leeb1a94e82013-11-15 10:42:51 +09001669}
1670
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001671int restore_node_summary(struct f2fs_sb_info *sbi,
1672 unsigned int segno, struct f2fs_summary_block *sum)
1673{
1674 struct f2fs_node *rn;
1675 struct f2fs_summary *sum_entry;
Changman Leeb1a94e82013-11-15 10:42:51 +09001676 struct page *page, *tmp;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001677 block_t addr;
Changman Leeb1a94e82013-11-15 10:42:51 +09001678 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
1679 int i, last_offset, nrpages, err = 0;
1680 LIST_HEAD(page_list);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001681
1682 /* scan the node segment */
1683 last_offset = sbi->blocks_per_seg;
1684 addr = START_BLOCK(sbi, segno);
1685 sum_entry = &sum->entries[0];
1686
Gu Zheng12e374b2014-03-07 18:43:36 +08001687 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
Changman Leeb1a94e82013-11-15 10:42:51 +09001688 nrpages = min(last_offset - i, bio_blocks);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001689
Changman Leeb1a94e82013-11-15 10:42:51 +09001690 /* read ahead node pages */
Gu Zheng12e374b2014-03-07 18:43:36 +08001691 nrpages = ra_sum_pages(sbi, &page_list, addr, nrpages);
1692 if (!nrpages)
1693 return -ENOMEM;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001694
Changman Leeb1a94e82013-11-15 10:42:51 +09001695 list_for_each_entry_safe(page, tmp, &page_list, lru) {
Gu Zheng12e374b2014-03-07 18:43:36 +08001696 if (err)
1697 goto skip;
Changman Leeb1a94e82013-11-15 10:42:51 +09001698
1699 lock_page(page);
1700 if (unlikely(!PageUptodate(page))) {
1701 err = -EIO;
1702 } else {
1703 rn = F2FS_NODE(page);
1704 sum_entry->nid = rn->footer.nid;
1705 sum_entry->version = 0;
1706 sum_entry->ofs_in_node = 0;
1707 sum_entry++;
1708 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001709 unlock_page(page);
Gu Zheng12e374b2014-03-07 18:43:36 +08001710skip:
1711 list_del(&page->lru);
Changman Leeb1a94e82013-11-15 10:42:51 +09001712 __free_pages(page, 0);
1713 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001714 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001715 return err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001716}
1717
1718static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1719{
1720 struct f2fs_nm_info *nm_i = NM_I(sbi);
1721 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1722 struct f2fs_summary_block *sum = curseg->sum_blk;
1723 int i;
1724
1725 mutex_lock(&curseg->curseg_mutex);
1726
1727 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1728 mutex_unlock(&curseg->curseg_mutex);
1729 return false;
1730 }
1731
1732 for (i = 0; i < nats_in_cursum(sum); i++) {
1733 struct nat_entry *ne;
1734 struct f2fs_nat_entry raw_ne;
1735 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1736
1737 raw_ne = nat_in_journal(sum, i);
1738retry:
1739 write_lock(&nm_i->nat_tree_lock);
1740 ne = __lookup_nat_cache(nm_i, nid);
1741 if (ne) {
1742 __set_nat_cache_dirty(nm_i, ne);
1743 write_unlock(&nm_i->nat_tree_lock);
1744 continue;
1745 }
1746 ne = grab_nat_entry(nm_i, nid);
1747 if (!ne) {
1748 write_unlock(&nm_i->nat_tree_lock);
1749 goto retry;
1750 }
1751 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1752 nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1753 nat_set_version(ne, raw_ne.version);
1754 __set_nat_cache_dirty(nm_i, ne);
1755 write_unlock(&nm_i->nat_tree_lock);
1756 }
1757 update_nats_in_cursum(sum, -i);
1758 mutex_unlock(&curseg->curseg_mutex);
1759 return true;
1760}
1761
1762/*
1763 * This function is called during the checkpointing process.
1764 */
1765void flush_nat_entries(struct f2fs_sb_info *sbi)
1766{
1767 struct f2fs_nm_info *nm_i = NM_I(sbi);
1768 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1769 struct f2fs_summary_block *sum = curseg->sum_blk;
1770 struct list_head *cur, *n;
1771 struct page *page = NULL;
1772 struct f2fs_nat_block *nat_blk = NULL;
1773 nid_t start_nid = 0, end_nid = 0;
1774 bool flushed;
1775
1776 flushed = flush_nats_in_journal(sbi);
1777
1778 if (!flushed)
1779 mutex_lock(&curseg->curseg_mutex);
1780
1781 /* 1) flush dirty nat caches */
1782 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1783 struct nat_entry *ne;
1784 nid_t nid;
1785 struct f2fs_nat_entry raw_ne;
1786 int offset = -1;
1787 block_t new_blkaddr;
1788
1789 ne = list_entry(cur, struct nat_entry, list);
1790 nid = nat_get_nid(ne);
1791
1792 if (nat_get_blkaddr(ne) == NEW_ADDR)
1793 continue;
1794 if (flushed)
1795 goto to_nat_page;
1796
1797 /* if there is room for nat enries in curseg->sumpage */
1798 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1799 if (offset >= 0) {
1800 raw_ne = nat_in_journal(sum, offset);
1801 goto flush_now;
1802 }
1803to_nat_page:
1804 if (!page || (start_nid > nid || nid > end_nid)) {
1805 if (page) {
1806 f2fs_put_page(page, 1);
1807 page = NULL;
1808 }
1809 start_nid = START_NID(nid);
1810 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1811
1812 /*
1813 * get nat block with dirty flag, increased reference
1814 * count, mapped and lock
1815 */
1816 page = get_next_nat_page(sbi, start_nid);
1817 nat_blk = page_address(page);
1818 }
1819
1820 f2fs_bug_on(!nat_blk);
1821 raw_ne = nat_blk->entries[nid - start_nid];
1822flush_now:
1823 new_blkaddr = nat_get_blkaddr(ne);
1824
1825 raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1826 raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1827 raw_ne.version = nat_get_version(ne);
1828
1829 if (offset < 0) {
1830 nat_blk->entries[nid - start_nid] = raw_ne;
1831 } else {
1832 nat_in_journal(sum, offset) = raw_ne;
1833 nid_in_journal(sum, offset) = cpu_to_le32(nid);
1834 }
1835
1836 if (nat_get_blkaddr(ne) == NULL_ADDR &&
1837 add_free_nid(NM_I(sbi), nid, false) <= 0) {
1838 write_lock(&nm_i->nat_tree_lock);
1839 __del_from_nat_cache(nm_i, ne);
1840 write_unlock(&nm_i->nat_tree_lock);
1841 } else {
1842 write_lock(&nm_i->nat_tree_lock);
1843 __clear_nat_cache_dirty(nm_i, ne);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001844 write_unlock(&nm_i->nat_tree_lock);
1845 }
1846 }
1847 if (!flushed)
1848 mutex_unlock(&curseg->curseg_mutex);
1849 f2fs_put_page(page, 1);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001850}
1851
1852static int init_node_manager(struct f2fs_sb_info *sbi)
1853{
1854 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1855 struct f2fs_nm_info *nm_i = NM_I(sbi);
1856 unsigned char *version_bitmap;
1857 unsigned int nat_segs, nat_blocks;
1858
1859 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1860
1861 /* segment_count_nat includes pair segment so divide to 2. */
1862 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1863 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
Jaegeuk Kim8fa144b2014-02-17 12:44:20 +09001864
1865 /* not used nids: 0, node, meta, (and root counted as valid node) */
1866 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks - 3;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001867 nm_i->fcnt = 0;
1868 nm_i->nat_cnt = 0;
Jaegeuk Kim327c57d2014-03-19 13:31:37 +09001869 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001870
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001871 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001872 INIT_LIST_HEAD(&nm_i->free_nid_list);
1873 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1874 INIT_LIST_HEAD(&nm_i->nat_entries);
1875 INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1876
1877 mutex_init(&nm_i->build_lock);
1878 spin_lock_init(&nm_i->free_nid_list_lock);
1879 rwlock_init(&nm_i->nat_tree_lock);
1880
1881 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1882 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1883 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1884 if (!version_bitmap)
1885 return -EFAULT;
1886
1887 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1888 GFP_KERNEL);
1889 if (!nm_i->nat_bitmap)
1890 return -ENOMEM;
1891 return 0;
1892}
1893
1894int build_node_manager(struct f2fs_sb_info *sbi)
1895{
1896 int err;
1897
1898 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1899 if (!sbi->nm_info)
1900 return -ENOMEM;
1901
1902 err = init_node_manager(sbi);
1903 if (err)
1904 return err;
1905
1906 build_free_nids(sbi);
1907 return 0;
1908}
1909
1910void destroy_node_manager(struct f2fs_sb_info *sbi)
1911{
1912 struct f2fs_nm_info *nm_i = NM_I(sbi);
1913 struct free_nid *i, *next_i;
1914 struct nat_entry *natvec[NATVEC_SIZE];
1915 nid_t nid = 0;
1916 unsigned int found;
1917
1918 if (!nm_i)
1919 return;
1920
1921 /* destroy free nid list */
1922 spin_lock(&nm_i->free_nid_list_lock);
1923 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1924 f2fs_bug_on(i->state == NID_ALLOC);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001925 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001926 nm_i->fcnt--;
1927 }
1928 f2fs_bug_on(nm_i->fcnt);
1929 spin_unlock(&nm_i->free_nid_list_lock);
1930
1931 /* destroy nat cache */
1932 write_lock(&nm_i->nat_tree_lock);
1933 while ((found = __gang_lookup_nat_cache(nm_i,
1934 nid, NATVEC_SIZE, natvec))) {
1935 unsigned idx;
Gu Zheng0c97ea92014-03-07 18:43:24 +08001936 nid = nat_get_nid(natvec[found - 1]) + 1;
1937 for (idx = 0; idx < found; idx++)
1938 __del_from_nat_cache(nm_i, natvec[idx]);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001939 }
1940 f2fs_bug_on(nm_i->nat_cnt);
1941 write_unlock(&nm_i->nat_tree_lock);
1942
1943 kfree(nm_i->nat_bitmap);
1944 sbi->nm_info = NULL;
1945 kfree(nm_i);
1946}
1947
1948int __init create_node_manager_caches(void)
1949{
1950 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
Gu Zhenge33dcea2014-03-07 18:43:28 +08001951 sizeof(struct nat_entry));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001952 if (!nat_entry_slab)
1953 return -ENOMEM;
1954
1955 free_nid_slab = f2fs_kmem_cache_create("free_nid",
Gu Zhenge33dcea2014-03-07 18:43:28 +08001956 sizeof(struct free_nid));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001957 if (!free_nid_slab) {
1958 kmem_cache_destroy(nat_entry_slab);
1959 return -ENOMEM;
1960 }
1961 return 0;
1962}
1963
1964void destroy_node_manager_caches(void)
1965{
1966 kmem_cache_destroy(free_nid_slab);
1967 kmem_cache_destroy(nat_entry_slab);
1968}