blob: 6b911ed29fb40be2006f5c7d2dce36335c6bb8b1 [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include <trace/events/f2fs.h>
23
Gu Zhengaa9c8b02014-02-21 18:08:29 +080024#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
25
Linus Torvalds8005ecc2012-12-20 13:54:51 -080026static struct kmem_cache *nat_entry_slab;
27static struct kmem_cache *free_nid_slab;
28
29static void clear_node_page_dirty(struct page *page)
30{
31 struct address_space *mapping = page->mapping;
32 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
33 unsigned int long flags;
34
35 if (PageDirty(page)) {
36 spin_lock_irqsave(&mapping->tree_lock, flags);
37 radix_tree_tag_clear(&mapping->page_tree,
38 page_index(page),
39 PAGECACHE_TAG_DIRTY);
40 spin_unlock_irqrestore(&mapping->tree_lock, flags);
41
42 clear_page_dirty_for_io(page);
43 dec_page_count(sbi, F2FS_DIRTY_NODES);
44 }
45 ClearPageUptodate(page);
46}
47
48static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
49{
50 pgoff_t index = current_nat_addr(sbi, nid);
51 return get_meta_page(sbi, index);
52}
53
54static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
55{
56 struct page *src_page;
57 struct page *dst_page;
58 pgoff_t src_off;
59 pgoff_t dst_off;
60 void *src_addr;
61 void *dst_addr;
62 struct f2fs_nm_info *nm_i = NM_I(sbi);
63
64 src_off = current_nat_addr(sbi, nid);
65 dst_off = next_nat_addr(sbi, src_off);
66
67 /* get current nat block page with lock */
68 src_page = get_meta_page(sbi, src_off);
69
70 /* Dirty src_page means that it is already the new target NAT page. */
71 if (PageDirty(src_page))
72 return src_page;
73
74 dst_page = grab_meta_page(sbi, dst_off);
75
76 src_addr = page_address(src_page);
77 dst_addr = page_address(dst_page);
78 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
79 set_page_dirty(dst_page);
80 f2fs_put_page(src_page, 1);
81
82 set_to_next_nat(nm_i, nid);
83
84 return dst_page;
85}
86
Linus Torvalds8005ecc2012-12-20 13:54:51 -080087static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
88{
89 return radix_tree_lookup(&nm_i->nat_root, n);
90}
91
92static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
93 nid_t start, unsigned int nr, struct nat_entry **ep)
94{
95 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
96}
97
98static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
99{
100 list_del(&e->list);
101 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
102 nm_i->nat_cnt--;
103 kmem_cache_free(nat_entry_slab, e);
104}
105
106int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
107{
108 struct f2fs_nm_info *nm_i = NM_I(sbi);
109 struct nat_entry *e;
110 int is_cp = 1;
111
112 read_lock(&nm_i->nat_tree_lock);
113 e = __lookup_nat_cache(nm_i, nid);
114 if (e && !e->checkpointed)
115 is_cp = 0;
116 read_unlock(&nm_i->nat_tree_lock);
117 return is_cp;
118}
119
120static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
121{
122 struct nat_entry *new;
123
124 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
125 if (!new)
126 return NULL;
127 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
128 kmem_cache_free(nat_entry_slab, new);
129 return NULL;
130 }
131 memset(new, 0, sizeof(struct nat_entry));
132 nat_set_nid(new, nid);
Jaegeuk Kimc5ed3b72014-02-21 13:17:22 +0900133 new->checkpointed = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800134 list_add_tail(&new->list, &nm_i->nat_entries);
135 nm_i->nat_cnt++;
136 return new;
137}
138
139static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
140 struct f2fs_nat_entry *ne)
141{
142 struct nat_entry *e;
143retry:
144 write_lock(&nm_i->nat_tree_lock);
145 e = __lookup_nat_cache(nm_i, nid);
146 if (!e) {
147 e = grab_nat_entry(nm_i, nid);
148 if (!e) {
149 write_unlock(&nm_i->nat_tree_lock);
150 goto retry;
151 }
152 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
153 nat_set_ino(e, le32_to_cpu(ne->ino));
154 nat_set_version(e, ne->version);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800155 }
156 write_unlock(&nm_i->nat_tree_lock);
157}
158
159static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
160 block_t new_blkaddr)
161{
162 struct f2fs_nm_info *nm_i = NM_I(sbi);
163 struct nat_entry *e;
164retry:
165 write_lock(&nm_i->nat_tree_lock);
166 e = __lookup_nat_cache(nm_i, ni->nid);
167 if (!e) {
168 e = grab_nat_entry(nm_i, ni->nid);
169 if (!e) {
170 write_unlock(&nm_i->nat_tree_lock);
171 goto retry;
172 }
173 e->ni = *ni;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800174 f2fs_bug_on(ni->blk_addr == NEW_ADDR);
175 } else if (new_blkaddr == NEW_ADDR) {
176 /*
177 * when nid is reallocated,
178 * previous nat entry can be remained in nat cache.
179 * So, reinitialize it with new information.
180 */
181 e->ni = *ni;
182 f2fs_bug_on(ni->blk_addr != NULL_ADDR);
183 }
184
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800185 /* sanity check */
186 f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
187 f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
188 new_blkaddr == NULL_ADDR);
189 f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
190 new_blkaddr == NEW_ADDR);
191 f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
192 nat_get_blkaddr(e) != NULL_ADDR &&
193 new_blkaddr == NEW_ADDR);
194
195 /* increament version no as node is removed */
196 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
197 unsigned char version = nat_get_version(e);
198 nat_set_version(e, inc_node_version(version));
199 }
200
201 /* change address */
202 nat_set_blkaddr(e, new_blkaddr);
203 __set_nat_cache_dirty(nm_i, e);
204 write_unlock(&nm_i->nat_tree_lock);
205}
206
207int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
208{
209 struct f2fs_nm_info *nm_i = NM_I(sbi);
210
211 if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
212 return 0;
213
214 write_lock(&nm_i->nat_tree_lock);
215 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
216 struct nat_entry *ne;
217 ne = list_first_entry(&nm_i->nat_entries,
218 struct nat_entry, list);
219 __del_from_nat_cache(nm_i, ne);
220 nr_shrink--;
221 }
222 write_unlock(&nm_i->nat_tree_lock);
223 return nr_shrink;
224}
225
226/*
227 * This function returns always success
228 */
229void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
230{
231 struct f2fs_nm_info *nm_i = NM_I(sbi);
232 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
233 struct f2fs_summary_block *sum = curseg->sum_blk;
234 nid_t start_nid = START_NID(nid);
235 struct f2fs_nat_block *nat_blk;
236 struct page *page = NULL;
237 struct f2fs_nat_entry ne;
238 struct nat_entry *e;
239 int i;
240
241 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
242 ni->nid = nid;
243
244 /* Check nat cache */
245 read_lock(&nm_i->nat_tree_lock);
246 e = __lookup_nat_cache(nm_i, nid);
247 if (e) {
248 ni->ino = nat_get_ino(e);
249 ni->blk_addr = nat_get_blkaddr(e);
250 ni->version = nat_get_version(e);
251 }
252 read_unlock(&nm_i->nat_tree_lock);
253 if (e)
254 return;
255
256 /* Check current segment summary */
257 mutex_lock(&curseg->curseg_mutex);
258 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
259 if (i >= 0) {
260 ne = nat_in_journal(sum, i);
261 node_info_from_raw_nat(ni, &ne);
262 }
263 mutex_unlock(&curseg->curseg_mutex);
264 if (i >= 0)
265 goto cache;
266
267 /* Fill node_info from nat page */
268 page = get_current_nat_page(sbi, start_nid);
269 nat_blk = (struct f2fs_nat_block *)page_address(page);
270 ne = nat_blk->entries[nid - start_nid];
271 node_info_from_raw_nat(ni, &ne);
272 f2fs_put_page(page, 1);
273cache:
274 /* cache nat entry */
275 cache_nat_entry(NM_I(sbi), nid, &ne);
276}
277
278/*
279 * The maximum depth is four.
280 * Offset[0] will have raw inode offset.
281 */
282static int get_node_path(struct f2fs_inode_info *fi, long block,
283 int offset[4], unsigned int noffset[4])
284{
285 const long direct_index = ADDRS_PER_INODE(fi);
286 const long direct_blks = ADDRS_PER_BLOCK;
287 const long dptrs_per_blk = NIDS_PER_BLOCK;
288 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
289 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
290 int n = 0;
291 int level = 0;
292
293 noffset[0] = 0;
294
295 if (block < direct_index) {
296 offset[n] = block;
297 goto got;
298 }
299 block -= direct_index;
300 if (block < direct_blks) {
301 offset[n++] = NODE_DIR1_BLOCK;
302 noffset[n] = 1;
303 offset[n] = block;
304 level = 1;
305 goto got;
306 }
307 block -= direct_blks;
308 if (block < direct_blks) {
309 offset[n++] = NODE_DIR2_BLOCK;
310 noffset[n] = 2;
311 offset[n] = block;
312 level = 1;
313 goto got;
314 }
315 block -= direct_blks;
316 if (block < indirect_blks) {
317 offset[n++] = NODE_IND1_BLOCK;
318 noffset[n] = 3;
319 offset[n++] = block / direct_blks;
320 noffset[n] = 4 + offset[n - 1];
321 offset[n] = block % direct_blks;
322 level = 2;
323 goto got;
324 }
325 block -= indirect_blks;
326 if (block < indirect_blks) {
327 offset[n++] = NODE_IND2_BLOCK;
328 noffset[n] = 4 + dptrs_per_blk;
329 offset[n++] = block / direct_blks;
330 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
331 offset[n] = block % direct_blks;
332 level = 2;
333 goto got;
334 }
335 block -= indirect_blks;
336 if (block < dindirect_blks) {
337 offset[n++] = NODE_DIND_BLOCK;
338 noffset[n] = 5 + (dptrs_per_blk * 2);
339 offset[n++] = block / indirect_blks;
340 noffset[n] = 6 + (dptrs_per_blk * 2) +
341 offset[n - 1] * (dptrs_per_blk + 1);
342 offset[n++] = (block / direct_blks) % dptrs_per_blk;
343 noffset[n] = 7 + (dptrs_per_blk * 2) +
344 offset[n - 2] * (dptrs_per_blk + 1) +
345 offset[n - 1];
346 offset[n] = block % direct_blks;
347 level = 3;
348 goto got;
349 } else {
350 BUG();
351 }
352got:
353 return level;
354}
355
356/*
357 * Caller should call f2fs_put_dnode(dn).
Changman Leeb1a94e82013-11-15 10:42:51 +0900358 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
359 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800360 * In the case of RDONLY_NODE, we don't need to care about mutex.
361 */
362int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
363{
364 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
365 struct page *npage[4];
366 struct page *parent;
367 int offset[4];
368 unsigned int noffset[4];
369 nid_t nids[4];
370 int level, i;
371 int err = 0;
372
373 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
374
375 nids[0] = dn->inode->i_ino;
376 npage[0] = dn->inode_page;
377
378 if (!npage[0]) {
379 npage[0] = get_node_page(sbi, nids[0]);
380 if (IS_ERR(npage[0]))
381 return PTR_ERR(npage[0]);
382 }
383 parent = npage[0];
384 if (level != 0)
385 nids[1] = get_nid(parent, offset[0], true);
386 dn->inode_page = npage[0];
387 dn->inode_page_locked = true;
388
389 /* get indirect or direct nodes */
390 for (i = 1; i <= level; i++) {
391 bool done = false;
392
393 if (!nids[i] && mode == ALLOC_NODE) {
394 /* alloc new node */
395 if (!alloc_nid(sbi, &(nids[i]))) {
396 err = -ENOSPC;
397 goto release_pages;
398 }
399
400 dn->nid = nids[i];
401 npage[i] = new_node_page(dn, noffset[i], NULL);
402 if (IS_ERR(npage[i])) {
403 alloc_nid_failed(sbi, nids[i]);
404 err = PTR_ERR(npage[i]);
405 goto release_pages;
406 }
407
408 set_nid(parent, offset[i - 1], nids[i], i == 1);
409 alloc_nid_done(sbi, nids[i]);
410 done = true;
411 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
412 npage[i] = get_node_page_ra(parent, offset[i - 1]);
413 if (IS_ERR(npage[i])) {
414 err = PTR_ERR(npage[i]);
415 goto release_pages;
416 }
417 done = true;
418 }
419 if (i == 1) {
420 dn->inode_page_locked = false;
421 unlock_page(parent);
422 } else {
423 f2fs_put_page(parent, 1);
424 }
425
426 if (!done) {
427 npage[i] = get_node_page(sbi, nids[i]);
428 if (IS_ERR(npage[i])) {
429 err = PTR_ERR(npage[i]);
430 f2fs_put_page(npage[0], 0);
431 goto release_out;
432 }
433 }
434 if (i < level) {
435 parent = npage[i];
436 nids[i + 1] = get_nid(parent, offset[i], false);
437 }
438 }
439 dn->nid = nids[level];
440 dn->ofs_in_node = offset[level];
441 dn->node_page = npage[level];
442 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
443 return 0;
444
445release_pages:
446 f2fs_put_page(parent, 1);
447 if (i > 1)
448 f2fs_put_page(npage[0], 0);
449release_out:
450 dn->inode_page = NULL;
451 dn->node_page = NULL;
452 return err;
453}
454
455static void truncate_node(struct dnode_of_data *dn)
456{
457 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
458 struct node_info ni;
459
460 get_node_info(sbi, dn->nid, &ni);
461 if (dn->inode->i_blocks == 0) {
462 f2fs_bug_on(ni.blk_addr != NULL_ADDR);
463 goto invalidate;
464 }
465 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
466
467 /* Deallocate node address */
468 invalidate_blocks(sbi, ni.blk_addr);
Changman Leeb1a94e82013-11-15 10:42:51 +0900469 dec_valid_node_count(sbi, dn->inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800470 set_node_addr(sbi, &ni, NULL_ADDR);
471
472 if (dn->nid == dn->inode->i_ino) {
473 remove_orphan_inode(sbi, dn->nid);
474 dec_valid_inode_count(sbi);
475 } else {
476 sync_inode_page(dn);
477 }
478invalidate:
479 clear_node_page_dirty(dn->node_page);
480 F2FS_SET_SB_DIRT(sbi);
481
482 f2fs_put_page(dn->node_page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900483
484 invalidate_mapping_pages(NODE_MAPPING(sbi),
485 dn->node_page->index, dn->node_page->index);
486
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800487 dn->node_page = NULL;
488 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
489}
490
491static int truncate_dnode(struct dnode_of_data *dn)
492{
493 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
494 struct page *page;
495
496 if (dn->nid == 0)
497 return 1;
498
499 /* get direct node */
500 page = get_node_page(sbi, dn->nid);
501 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
502 return 1;
503 else if (IS_ERR(page))
504 return PTR_ERR(page);
505
506 /* Make dnode_of_data for parameter */
507 dn->node_page = page;
508 dn->ofs_in_node = 0;
509 truncate_data_blocks(dn);
510 truncate_node(dn);
511 return 1;
512}
513
514static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
515 int ofs, int depth)
516{
517 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
518 struct dnode_of_data rdn = *dn;
519 struct page *page;
520 struct f2fs_node *rn;
521 nid_t child_nid;
522 unsigned int child_nofs;
523 int freed = 0;
524 int i, ret;
525
526 if (dn->nid == 0)
527 return NIDS_PER_BLOCK + 1;
528
529 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
530
531 page = get_node_page(sbi, dn->nid);
532 if (IS_ERR(page)) {
533 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
534 return PTR_ERR(page);
535 }
536
537 rn = F2FS_NODE(page);
538 if (depth < 3) {
539 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
540 child_nid = le32_to_cpu(rn->in.nid[i]);
541 if (child_nid == 0)
542 continue;
543 rdn.nid = child_nid;
544 ret = truncate_dnode(&rdn);
545 if (ret < 0)
546 goto out_err;
547 set_nid(page, i, 0, false);
548 }
549 } else {
550 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
551 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
552 child_nid = le32_to_cpu(rn->in.nid[i]);
553 if (child_nid == 0) {
554 child_nofs += NIDS_PER_BLOCK + 1;
555 continue;
556 }
557 rdn.nid = child_nid;
558 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
559 if (ret == (NIDS_PER_BLOCK + 1)) {
560 set_nid(page, i, 0, false);
561 child_nofs += ret;
562 } else if (ret < 0 && ret != -ENOENT) {
563 goto out_err;
564 }
565 }
566 freed = child_nofs;
567 }
568
569 if (!ofs) {
570 /* remove current indirect node */
571 dn->node_page = page;
572 truncate_node(dn);
573 freed++;
574 } else {
575 f2fs_put_page(page, 1);
576 }
577 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
578 return freed;
579
580out_err:
581 f2fs_put_page(page, 1);
582 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
583 return ret;
584}
585
586static int truncate_partial_nodes(struct dnode_of_data *dn,
587 struct f2fs_inode *ri, int *offset, int depth)
588{
589 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
590 struct page *pages[2];
591 nid_t nid[3];
592 nid_t child_nid;
593 int err = 0;
594 int i;
595 int idx = depth - 2;
596
597 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
598 if (!nid[0])
599 return 0;
600
601 /* get indirect nodes in the path */
Changman Leeb1a94e82013-11-15 10:42:51 +0900602 for (i = 0; i < idx + 1; i++) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800603 /* refernece count'll be increased */
604 pages[i] = get_node_page(sbi, nid[i]);
605 if (IS_ERR(pages[i])) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800606 err = PTR_ERR(pages[i]);
Changman Leeb1a94e82013-11-15 10:42:51 +0900607 idx = i - 1;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800608 goto fail;
609 }
610 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
611 }
612
613 /* free direct nodes linked to a partial indirect node */
Changman Leeb1a94e82013-11-15 10:42:51 +0900614 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800615 child_nid = get_nid(pages[idx], i, false);
616 if (!child_nid)
617 continue;
618 dn->nid = child_nid;
619 err = truncate_dnode(dn);
620 if (err < 0)
621 goto fail;
622 set_nid(pages[idx], i, 0, false);
623 }
624
Changman Leeb1a94e82013-11-15 10:42:51 +0900625 if (offset[idx + 1] == 0) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800626 dn->node_page = pages[idx];
627 dn->nid = nid[idx];
628 truncate_node(dn);
629 } else {
630 f2fs_put_page(pages[idx], 1);
631 }
632 offset[idx]++;
Changman Leeb1a94e82013-11-15 10:42:51 +0900633 offset[idx + 1] = 0;
634 idx--;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800635fail:
Changman Leeb1a94e82013-11-15 10:42:51 +0900636 for (i = idx; i >= 0; i--)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800637 f2fs_put_page(pages[i], 1);
638
639 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
640
641 return err;
642}
643
644/*
645 * All the block addresses of data and nodes should be nullified.
646 */
647int truncate_inode_blocks(struct inode *inode, pgoff_t from)
648{
649 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800650 int err = 0, cont = 1;
651 int level, offset[4], noffset[4];
652 unsigned int nofs = 0;
Changman Leeb1a94e82013-11-15 10:42:51 +0900653 struct f2fs_inode *ri;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800654 struct dnode_of_data dn;
655 struct page *page;
656
657 trace_f2fs_truncate_inode_blocks_enter(inode, from);
658
659 level = get_node_path(F2FS_I(inode), from, offset, noffset);
660restart:
661 page = get_node_page(sbi, inode->i_ino);
662 if (IS_ERR(page)) {
663 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
664 return PTR_ERR(page);
665 }
666
667 set_new_dnode(&dn, inode, page, NULL, 0);
668 unlock_page(page);
669
Changman Leeb1a94e82013-11-15 10:42:51 +0900670 ri = F2FS_INODE(page);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800671 switch (level) {
672 case 0:
673 case 1:
674 nofs = noffset[1];
675 break;
676 case 2:
677 nofs = noffset[1];
678 if (!offset[level - 1])
679 goto skip_partial;
Changman Leeb1a94e82013-11-15 10:42:51 +0900680 err = truncate_partial_nodes(&dn, ri, offset, level);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800681 if (err < 0 && err != -ENOENT)
682 goto fail;
683 nofs += 1 + NIDS_PER_BLOCK;
684 break;
685 case 3:
686 nofs = 5 + 2 * NIDS_PER_BLOCK;
687 if (!offset[level - 1])
688 goto skip_partial;
Changman Leeb1a94e82013-11-15 10:42:51 +0900689 err = truncate_partial_nodes(&dn, ri, offset, level);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800690 if (err < 0 && err != -ENOENT)
691 goto fail;
692 break;
693 default:
694 BUG();
695 }
696
697skip_partial:
698 while (cont) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900699 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800700 switch (offset[0]) {
701 case NODE_DIR1_BLOCK:
702 case NODE_DIR2_BLOCK:
703 err = truncate_dnode(&dn);
704 break;
705
706 case NODE_IND1_BLOCK:
707 case NODE_IND2_BLOCK:
708 err = truncate_nodes(&dn, nofs, offset[1], 2);
709 break;
710
711 case NODE_DIND_BLOCK:
712 err = truncate_nodes(&dn, nofs, offset[1], 3);
713 cont = 0;
714 break;
715
716 default:
717 BUG();
718 }
719 if (err < 0 && err != -ENOENT)
720 goto fail;
721 if (offset[1] == 0 &&
Changman Leeb1a94e82013-11-15 10:42:51 +0900722 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800723 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900724 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800725 f2fs_put_page(page, 1);
726 goto restart;
727 }
Jaegeuk Kim4b66d802014-03-18 13:29:07 +0900728 f2fs_wait_on_page_writeback(page, NODE);
Changman Leeb1a94e82013-11-15 10:42:51 +0900729 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800730 set_page_dirty(page);
731 unlock_page(page);
732 }
733 offset[1] = 0;
734 offset[0]++;
735 nofs += err;
736 }
737fail:
738 f2fs_put_page(page, 0);
739 trace_f2fs_truncate_inode_blocks_exit(inode, err);
740 return err > 0 ? 0 : err;
741}
742
743int truncate_xattr_node(struct inode *inode, struct page *page)
744{
745 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
746 nid_t nid = F2FS_I(inode)->i_xattr_nid;
747 struct dnode_of_data dn;
748 struct page *npage;
749
750 if (!nid)
751 return 0;
752
753 npage = get_node_page(sbi, nid);
754 if (IS_ERR(npage))
755 return PTR_ERR(npage);
756
757 F2FS_I(inode)->i_xattr_nid = 0;
758
759 /* need to do checkpoint during fsync */
760 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
761
762 set_new_dnode(&dn, inode, page, npage, nid);
763
764 if (page)
Changman Leeb1a94e82013-11-15 10:42:51 +0900765 dn.inode_page_locked = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800766 truncate_node(&dn);
767 return 0;
768}
769
770/*
Changman Leeb1a94e82013-11-15 10:42:51 +0900771 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
772 * f2fs_unlock_op().
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800773 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900774void remove_inode_page(struct inode *inode)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800775{
776 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
777 struct page *page;
778 nid_t ino = inode->i_ino;
779 struct dnode_of_data dn;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800780
781 page = get_node_page(sbi, ino);
782 if (IS_ERR(page))
Changman Leeb1a94e82013-11-15 10:42:51 +0900783 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800784
Changman Leeb1a94e82013-11-15 10:42:51 +0900785 if (truncate_xattr_node(inode, page)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800786 f2fs_put_page(page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900787 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800788 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800789 /* 0 is possible, after f2fs_new_inode() is failed */
790 f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
791 set_new_dnode(&dn, inode, page, page, ino);
792 truncate_node(&dn);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800793}
794
795struct page *new_inode_page(struct inode *inode, const struct qstr *name)
796{
797 struct dnode_of_data dn;
798
799 /* allocate inode page for new inode */
800 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
801
802 /* caller should f2fs_put_page(page, 1); */
803 return new_node_page(&dn, 0, NULL);
804}
805
806struct page *new_node_page(struct dnode_of_data *dn,
807 unsigned int ofs, struct page *ipage)
808{
809 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800810 struct node_info old_ni, new_ni;
811 struct page *page;
812 int err;
813
Changman Leeb1a94e82013-11-15 10:42:51 +0900814 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800815 return ERR_PTR(-EPERM);
816
Jaegeuk Kim4b66d802014-03-18 13:29:07 +0900817 page = grab_cache_page_write_begin(NODE_MAPPING(sbi),
818 dn->nid, AOP_FLAG_NOFS);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800819 if (!page)
820 return ERR_PTR(-ENOMEM);
821
Changman Leeb1a94e82013-11-15 10:42:51 +0900822 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800823 err = -ENOSPC;
824 goto fail;
825 }
826
827 get_node_info(sbi, dn->nid, &old_ni);
828
829 /* Reinitialize old_ni with new node page */
830 f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
831 new_ni = old_ni;
832 new_ni.ino = dn->inode->i_ino;
833 set_node_addr(sbi, &new_ni, NEW_ADDR);
834
835 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
836 set_cold_node(dn->inode, page);
837 SetPageUptodate(page);
838 set_page_dirty(page);
839
Chao Yueea95c42014-03-17 16:35:06 +0800840 if (f2fs_has_xattr_block(ofs))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800841 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
842
843 dn->node_page = page;
844 if (ipage)
845 update_inode(dn->inode, ipage);
846 else
847 sync_inode_page(dn);
848 if (ofs == 0)
849 inc_valid_inode_count(sbi);
850
851 return page;
852
853fail:
854 clear_node_page_dirty(page);
855 f2fs_put_page(page, 1);
856 return ERR_PTR(err);
857}
858
859/*
860 * Caller should do after getting the following values.
861 * 0: f2fs_put_page(page, 0)
862 * LOCKED_PAGE: f2fs_put_page(page, 1)
863 * error: nothing
864 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900865static int read_node_page(struct page *page, int rw)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800866{
867 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
868 struct node_info ni;
869
870 get_node_info(sbi, page->index, &ni);
871
Changman Leeb1a94e82013-11-15 10:42:51 +0900872 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800873 f2fs_put_page(page, 1);
874 return -ENOENT;
875 }
876
877 if (PageUptodate(page))
878 return LOCKED_PAGE;
879
Changman Leeb1a94e82013-11-15 10:42:51 +0900880 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800881}
882
883/*
884 * Readahead a node page
885 */
886void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
887{
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800888 struct page *apage;
889 int err;
890
Changman Leeb1a94e82013-11-15 10:42:51 +0900891 apage = find_get_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800892 if (apage && PageUptodate(apage)) {
893 f2fs_put_page(apage, 0);
894 return;
895 }
896 f2fs_put_page(apage, 0);
897
Changman Leeb1a94e82013-11-15 10:42:51 +0900898 apage = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800899 if (!apage)
900 return;
901
902 err = read_node_page(apage, READA);
903 if (err == 0)
904 f2fs_put_page(apage, 0);
905 else if (err == LOCKED_PAGE)
906 f2fs_put_page(apage, 1);
907}
908
909struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
910{
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800911 struct page *page;
912 int err;
913repeat:
Jaegeuk Kim4b66d802014-03-18 13:29:07 +0900914 page = grab_cache_page_write_begin(NODE_MAPPING(sbi),
915 nid, AOP_FLAG_NOFS);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800916 if (!page)
917 return ERR_PTR(-ENOMEM);
918
919 err = read_node_page(page, READ_SYNC);
920 if (err < 0)
921 return ERR_PTR(err);
922 else if (err == LOCKED_PAGE)
923 goto got_it;
924
925 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900926 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800927 f2fs_put_page(page, 1);
928 return ERR_PTR(-EIO);
929 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900930 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800931 f2fs_put_page(page, 1);
932 goto repeat;
933 }
934got_it:
935 f2fs_bug_on(nid != nid_of_node(page));
936 mark_page_accessed(page);
937 return page;
938}
939
940/*
941 * Return a locked page for the desired node page.
942 * And, readahead MAX_RA_NODE number of node pages.
943 */
944struct page *get_node_page_ra(struct page *parent, int start)
945{
946 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800947 struct blk_plug plug;
948 struct page *page;
949 int err, i, end;
950 nid_t nid;
951
952 /* First, try getting the desired direct node. */
953 nid = get_nid(parent, start, false);
954 if (!nid)
955 return ERR_PTR(-ENOENT);
956repeat:
Changman Leeb1a94e82013-11-15 10:42:51 +0900957 page = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800958 if (!page)
959 return ERR_PTR(-ENOMEM);
960
961 err = read_node_page(page, READ_SYNC);
962 if (err < 0)
963 return ERR_PTR(err);
964 else if (err == LOCKED_PAGE)
965 goto page_hit;
966
967 blk_start_plug(&plug);
968
969 /* Then, try readahead for siblings of the desired node */
970 end = start + MAX_RA_NODE;
971 end = min(end, NIDS_PER_BLOCK);
972 for (i = start + 1; i < end; i++) {
973 nid = get_nid(parent, i, false);
974 if (!nid)
975 continue;
976 ra_node_page(sbi, nid);
977 }
978
979 blk_finish_plug(&plug);
980
981 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900982 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800983 f2fs_put_page(page, 1);
984 goto repeat;
985 }
986page_hit:
Changman Leeb1a94e82013-11-15 10:42:51 +0900987 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800988 f2fs_put_page(page, 1);
989 return ERR_PTR(-EIO);
990 }
991 mark_page_accessed(page);
992 return page;
993}
994
995void sync_inode_page(struct dnode_of_data *dn)
996{
997 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
998 update_inode(dn->inode, dn->node_page);
999 } else if (dn->inode_page) {
1000 if (!dn->inode_page_locked)
1001 lock_page(dn->inode_page);
1002 update_inode(dn->inode, dn->inode_page);
1003 if (!dn->inode_page_locked)
1004 unlock_page(dn->inode_page);
1005 } else {
1006 update_inode_page(dn->inode);
1007 }
1008}
1009
1010int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1011 struct writeback_control *wbc)
1012{
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001013 pgoff_t index, end;
1014 struct pagevec pvec;
1015 int step = ino ? 2 : 0;
1016 int nwritten = 0, wrote = 0;
1017
1018 pagevec_init(&pvec, 0);
1019
1020next_step:
1021 index = 0;
1022 end = LONG_MAX;
1023
1024 while (index <= end) {
1025 int i, nr_pages;
Changman Leeb1a94e82013-11-15 10:42:51 +09001026 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001027 PAGECACHE_TAG_DIRTY,
1028 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1029 if (nr_pages == 0)
1030 break;
1031
1032 for (i = 0; i < nr_pages; i++) {
1033 struct page *page = pvec.pages[i];
1034
1035 /*
1036 * flushing sequence with step:
1037 * 0. indirect nodes
1038 * 1. dentry dnodes
1039 * 2. file dnodes
1040 */
1041 if (step == 0 && IS_DNODE(page))
1042 continue;
1043 if (step == 1 && (!IS_DNODE(page) ||
1044 is_cold_node(page)))
1045 continue;
1046 if (step == 2 && (!IS_DNODE(page) ||
1047 !is_cold_node(page)))
1048 continue;
1049
1050 /*
1051 * If an fsync mode,
1052 * we should not skip writing node pages.
1053 */
1054 if (ino && ino_of_node(page) == ino)
1055 lock_page(page);
1056 else if (!trylock_page(page))
1057 continue;
1058
Changman Leeb1a94e82013-11-15 10:42:51 +09001059 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001060continue_unlock:
1061 unlock_page(page);
1062 continue;
1063 }
1064 if (ino && ino_of_node(page) != ino)
1065 goto continue_unlock;
1066
1067 if (!PageDirty(page)) {
1068 /* someone wrote it for us */
1069 goto continue_unlock;
1070 }
1071
1072 if (!clear_page_dirty_for_io(page))
1073 goto continue_unlock;
1074
1075 /* called by fsync() */
1076 if (ino && IS_DNODE(page)) {
1077 int mark = !is_checkpointed_node(sbi, ino);
1078 set_fsync_mark(page, 1);
1079 if (IS_INODE(page))
1080 set_dentry_mark(page, mark);
1081 nwritten++;
1082 } else {
1083 set_fsync_mark(page, 0);
1084 set_dentry_mark(page, 0);
1085 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001086 NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001087 wrote++;
1088
1089 if (--wbc->nr_to_write == 0)
1090 break;
1091 }
1092 pagevec_release(&pvec);
1093 cond_resched();
1094
1095 if (wbc->nr_to_write == 0) {
1096 step = 2;
1097 break;
1098 }
1099 }
1100
1101 if (step < 2) {
1102 step++;
1103 goto next_step;
1104 }
1105
1106 if (wrote)
Changman Leeb1a94e82013-11-15 10:42:51 +09001107 f2fs_submit_merged_bio(sbi, NODE, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001108 return nwritten;
1109}
1110
1111int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1112{
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001113 pgoff_t index = 0, end = LONG_MAX;
1114 struct pagevec pvec;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001115 int ret2 = 0, ret = 0;
1116
1117 pagevec_init(&pvec, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +09001118
1119 while (index <= end) {
1120 int i, nr_pages;
1121 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1122 PAGECACHE_TAG_WRITEBACK,
1123 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1124 if (nr_pages == 0)
1125 break;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001126
1127 for (i = 0; i < nr_pages; i++) {
1128 struct page *page = pvec.pages[i];
1129
1130 /* until radix tree lookup accepts end_index */
Changman Leeb1a94e82013-11-15 10:42:51 +09001131 if (unlikely(page->index > end))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001132 continue;
1133
1134 if (ino && ino_of_node(page) == ino) {
Jaegeuk Kim4b66d802014-03-18 13:29:07 +09001135 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001136 if (TestClearPageError(page))
1137 ret = -EIO;
1138 }
1139 }
1140 pagevec_release(&pvec);
1141 cond_resched();
1142 }
1143
Changman Leeb1a94e82013-11-15 10:42:51 +09001144 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001145 ret2 = -ENOSPC;
Changman Leeb1a94e82013-11-15 10:42:51 +09001146 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001147 ret2 = -EIO;
1148 if (!ret)
1149 ret = ret2;
1150 return ret;
1151}
1152
1153static int f2fs_write_node_page(struct page *page,
1154 struct writeback_control *wbc)
1155{
1156 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1157 nid_t nid;
1158 block_t new_addr;
1159 struct node_info ni;
Changman Leeb1a94e82013-11-15 10:42:51 +09001160 struct f2fs_io_info fio = {
1161 .type = NODE,
1162 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1163 };
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001164
Changman Leeb1a94e82013-11-15 10:42:51 +09001165 if (unlikely(sbi->por_doing))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001166 goto redirty_out;
1167
Jaegeuk Kim4b66d802014-03-18 13:29:07 +09001168 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001169
1170 /* get old block addr of this node page */
1171 nid = nid_of_node(page);
1172 f2fs_bug_on(page->index != nid);
1173
1174 get_node_info(sbi, nid, &ni);
1175
1176 /* This page is already truncated */
Changman Leeb1a94e82013-11-15 10:42:51 +09001177 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001178 dec_page_count(sbi, F2FS_DIRTY_NODES);
1179 unlock_page(page);
1180 return 0;
1181 }
1182
1183 if (wbc->for_reclaim)
1184 goto redirty_out;
1185
1186 mutex_lock(&sbi->node_write);
1187 set_page_writeback(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001188 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001189 set_node_addr(sbi, &ni, new_addr);
1190 dec_page_count(sbi, F2FS_DIRTY_NODES);
1191 mutex_unlock(&sbi->node_write);
1192 unlock_page(page);
1193 return 0;
1194
1195redirty_out:
1196 dec_page_count(sbi, F2FS_DIRTY_NODES);
1197 wbc->pages_skipped++;
Chao Yu5c19ba62014-02-28 10:12:05 +08001198 account_page_redirty(page);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001199 set_page_dirty(page);
1200 return AOP_WRITEPAGE_ACTIVATE;
1201}
1202
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001203static int f2fs_write_node_pages(struct address_space *mapping,
1204 struct writeback_control *wbc)
1205{
1206 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001207 long diff;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001208
1209 /* balancing f2fs's metadata in background */
1210 f2fs_balance_fs_bg(sbi);
1211
1212 /* collect a number of dirty node pages and write together */
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +09001213 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
Jaegeuk Kim823d59f2014-03-18 13:43:05 +09001214 goto skip_write;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001215
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001216 diff = nr_pages_to_write(sbi, NODE, wbc);
Changman Leeb1a94e82013-11-15 10:42:51 +09001217 wbc->sync_mode = WB_SYNC_NONE;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001218 sync_node_pages(sbi, 0, wbc);
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001219 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001220 return 0;
Jaegeuk Kim823d59f2014-03-18 13:43:05 +09001221
1222skip_write:
1223 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1224 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001225}
1226
1227static int f2fs_set_node_page_dirty(struct page *page)
1228{
1229 struct address_space *mapping = page->mapping;
1230 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1231
1232 trace_f2fs_set_page_dirty(page, NODE);
1233
1234 SetPageUptodate(page);
1235 if (!PageDirty(page)) {
1236 __set_page_dirty_nobuffers(page);
1237 inc_page_count(sbi, F2FS_DIRTY_NODES);
1238 SetPagePrivate(page);
1239 return 1;
1240 }
1241 return 0;
1242}
1243
1244static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1245{
1246 struct inode *inode = page->mapping->host;
1247 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1248 if (PageDirty(page))
1249 dec_page_count(sbi, F2FS_DIRTY_NODES);
1250 ClearPagePrivate(page);
1251}
1252
1253static int f2fs_release_node_page(struct page *page, gfp_t wait)
1254{
1255 ClearPagePrivate(page);
1256 return 1;
1257}
1258
1259/*
1260 * Structure of the f2fs node operations
1261 */
1262const struct address_space_operations f2fs_node_aops = {
1263 .writepage = f2fs_write_node_page,
1264 .writepages = f2fs_write_node_pages,
1265 .set_page_dirty = f2fs_set_node_page_dirty,
1266 .invalidatepage = f2fs_invalidate_node_page,
1267 .releasepage = f2fs_release_node_page,
1268};
1269
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001270static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1271 nid_t n)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001272{
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001273 return radix_tree_lookup(&nm_i->free_nid_root, n);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001274}
1275
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001276static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1277 struct free_nid *i)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001278{
1279 list_del(&i->list);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001280 radix_tree_delete(&nm_i->free_nid_root, i->nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001281 kmem_cache_free(free_nid_slab, i);
1282}
1283
1284static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1285{
1286 struct free_nid *i;
1287 struct nat_entry *ne;
1288 bool allocated = false;
1289
1290 if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1291 return -1;
1292
1293 /* 0 nid should not be used */
Changman Leeb1a94e82013-11-15 10:42:51 +09001294 if (unlikely(nid == 0))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001295 return 0;
1296
1297 if (build) {
1298 /* do not add allocated nids */
1299 read_lock(&nm_i->nat_tree_lock);
1300 ne = __lookup_nat_cache(nm_i, nid);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001301 if (ne &&
1302 (!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001303 allocated = true;
1304 read_unlock(&nm_i->nat_tree_lock);
1305 if (allocated)
1306 return 0;
1307 }
1308
1309 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1310 i->nid = nid;
1311 i->state = NID_NEW;
1312
1313 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001314 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001315 spin_unlock(&nm_i->free_nid_list_lock);
1316 kmem_cache_free(free_nid_slab, i);
1317 return 0;
1318 }
1319 list_add_tail(&i->list, &nm_i->free_nid_list);
1320 nm_i->fcnt++;
1321 spin_unlock(&nm_i->free_nid_list_lock);
1322 return 1;
1323}
1324
1325static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1326{
1327 struct free_nid *i;
1328 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001329 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001330 if (i && i->state == NID_NEW) {
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001331 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001332 nm_i->fcnt--;
1333 }
1334 spin_unlock(&nm_i->free_nid_list_lock);
1335}
1336
1337static void scan_nat_page(struct f2fs_nm_info *nm_i,
1338 struct page *nat_page, nid_t start_nid)
1339{
1340 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1341 block_t blk_addr;
1342 int i;
1343
1344 i = start_nid % NAT_ENTRY_PER_BLOCK;
1345
1346 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1347
Changman Leeb1a94e82013-11-15 10:42:51 +09001348 if (unlikely(start_nid >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001349 break;
1350
1351 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1352 f2fs_bug_on(blk_addr == NEW_ADDR);
1353 if (blk_addr == NULL_ADDR) {
1354 if (add_free_nid(nm_i, start_nid, true) < 0)
1355 break;
1356 }
1357 }
1358}
1359
1360static void build_free_nids(struct f2fs_sb_info *sbi)
1361{
1362 struct f2fs_nm_info *nm_i = NM_I(sbi);
1363 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1364 struct f2fs_summary_block *sum = curseg->sum_blk;
1365 int i = 0;
1366 nid_t nid = nm_i->next_scan_nid;
1367
1368 /* Enough entries */
1369 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1370 return;
1371
1372 /* readahead nat pages to be scanned */
Chao Yu624b14f2014-02-07 16:11:53 +08001373 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001374
1375 while (1) {
1376 struct page *page = get_current_nat_page(sbi, nid);
1377
1378 scan_nat_page(nm_i, page, nid);
1379 f2fs_put_page(page, 1);
1380
1381 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
Changman Leeb1a94e82013-11-15 10:42:51 +09001382 if (unlikely(nid >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001383 nid = 0;
1384
1385 if (i++ == FREE_NID_PAGES)
1386 break;
1387 }
1388
1389 /* go to the next free nat pages to find free nids abundantly */
1390 nm_i->next_scan_nid = nid;
1391
1392 /* find free nids from current sum_pages */
1393 mutex_lock(&curseg->curseg_mutex);
1394 for (i = 0; i < nats_in_cursum(sum); i++) {
1395 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1396 nid = le32_to_cpu(nid_in_journal(sum, i));
1397 if (addr == NULL_ADDR)
1398 add_free_nid(nm_i, nid, true);
1399 else
1400 remove_free_nid(nm_i, nid);
1401 }
1402 mutex_unlock(&curseg->curseg_mutex);
1403}
1404
1405/*
1406 * If this function returns success, caller can obtain a new nid
1407 * from second parameter of this function.
1408 * The returned nid could be used ino as well as nid when inode is created.
1409 */
1410bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1411{
1412 struct f2fs_nm_info *nm_i = NM_I(sbi);
1413 struct free_nid *i = NULL;
1414 struct list_head *this;
1415retry:
Changman Leeb1a94e82013-11-15 10:42:51 +09001416 if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001417 return false;
1418
1419 spin_lock(&nm_i->free_nid_list_lock);
1420
1421 /* We should not use stale free nids created by build_free_nids */
Gu Zhengaa9c8b02014-02-21 18:08:29 +08001422 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001423 f2fs_bug_on(list_empty(&nm_i->free_nid_list));
1424 list_for_each(this, &nm_i->free_nid_list) {
1425 i = list_entry(this, struct free_nid, list);
1426 if (i->state == NID_NEW)
1427 break;
1428 }
1429
1430 f2fs_bug_on(i->state != NID_NEW);
1431 *nid = i->nid;
1432 i->state = NID_ALLOC;
1433 nm_i->fcnt--;
1434 spin_unlock(&nm_i->free_nid_list_lock);
1435 return true;
1436 }
1437 spin_unlock(&nm_i->free_nid_list_lock);
1438
1439 /* Let's scan nat pages and its caches to get free nids */
1440 mutex_lock(&nm_i->build_lock);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001441 build_free_nids(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001442 mutex_unlock(&nm_i->build_lock);
1443 goto retry;
1444}
1445
1446/*
1447 * alloc_nid() should be called prior to this function.
1448 */
1449void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1450{
1451 struct f2fs_nm_info *nm_i = NM_I(sbi);
1452 struct free_nid *i;
1453
1454 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001455 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001456 f2fs_bug_on(!i || i->state != NID_ALLOC);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001457 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001458 spin_unlock(&nm_i->free_nid_list_lock);
1459}
1460
1461/*
1462 * alloc_nid() should be called prior to this function.
1463 */
1464void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1465{
1466 struct f2fs_nm_info *nm_i = NM_I(sbi);
1467 struct free_nid *i;
1468
1469 if (!nid)
1470 return;
1471
1472 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001473 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001474 f2fs_bug_on(!i || i->state != NID_ALLOC);
1475 if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001476 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001477 } else {
1478 i->state = NID_NEW;
1479 nm_i->fcnt++;
1480 }
1481 spin_unlock(&nm_i->free_nid_list_lock);
1482}
1483
1484void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1485 struct f2fs_summary *sum, struct node_info *ni,
1486 block_t new_blkaddr)
1487{
1488 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1489 set_node_addr(sbi, ni, new_blkaddr);
1490 clear_node_page_dirty(page);
1491}
1492
Chao Yub3606c92014-03-11 13:37:38 +08001493void recover_inline_xattr(struct inode *inode, struct page *page)
1494{
1495 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1496 void *src_addr, *dst_addr;
1497 size_t inline_size;
1498 struct page *ipage;
1499 struct f2fs_inode *ri;
1500
Chao Yu70da8c62014-03-12 15:59:03 +08001501 if (!f2fs_has_inline_xattr(inode))
Chao Yub3606c92014-03-11 13:37:38 +08001502 return;
1503
1504 if (!IS_INODE(page))
1505 return;
1506
1507 ri = F2FS_INODE(page);
1508 if (!(ri->i_inline & F2FS_INLINE_XATTR))
1509 return;
1510
1511 ipage = get_node_page(sbi, inode->i_ino);
1512 f2fs_bug_on(IS_ERR(ipage));
1513
1514 dst_addr = inline_xattr_addr(ipage);
1515 src_addr = inline_xattr_addr(page);
1516 inline_size = inline_xattr_size(inode);
1517
1518 memcpy(dst_addr, src_addr, inline_size);
1519
1520 update_inode(inode, ipage);
1521 f2fs_put_page(ipage, 1);
1522}
1523
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001524bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1525{
1526 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1527 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1528 nid_t new_xnid = nid_of_node(page);
1529 struct node_info ni;
1530
Chao Yub3606c92014-03-11 13:37:38 +08001531 recover_inline_xattr(inode, page);
1532
Chao Yueea95c42014-03-17 16:35:06 +08001533 if (!f2fs_has_xattr_block(ofs_of_node(page)))
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001534 return false;
1535
1536 /* 1: invalidate the previous xattr nid */
1537 if (!prev_xnid)
1538 goto recover_xnid;
1539
1540 /* Deallocate node address */
1541 get_node_info(sbi, prev_xnid, &ni);
1542 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
1543 invalidate_blocks(sbi, ni.blk_addr);
1544 dec_valid_node_count(sbi, inode);
1545 set_node_addr(sbi, &ni, NULL_ADDR);
1546
1547recover_xnid:
1548 /* 2: allocate new xattr nid */
1549 if (unlikely(!inc_valid_node_count(sbi, inode)))
1550 f2fs_bug_on(1);
1551
1552 remove_free_nid(NM_I(sbi), new_xnid);
1553 get_node_info(sbi, new_xnid, &ni);
1554 ni.ino = inode->i_ino;
1555 set_node_addr(sbi, &ni, NEW_ADDR);
1556 F2FS_I(inode)->i_xattr_nid = new_xnid;
1557
1558 /* 3: update xattr blkaddr */
1559 refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
1560 set_node_addr(sbi, &ni, blkaddr);
1561
1562 update_inode_page(inode);
1563 return true;
1564}
1565
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001566int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1567{
Changman Leeb1a94e82013-11-15 10:42:51 +09001568 struct f2fs_inode *src, *dst;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001569 nid_t ino = ino_of_node(page);
1570 struct node_info old_ni, new_ni;
1571 struct page *ipage;
1572
Changman Leeb1a94e82013-11-15 10:42:51 +09001573 ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001574 if (!ipage)
1575 return -ENOMEM;
1576
1577 /* Should not use this inode from free nid list */
1578 remove_free_nid(NM_I(sbi), ino);
1579
1580 get_node_info(sbi, ino, &old_ni);
1581 SetPageUptodate(ipage);
1582 fill_node_footer(ipage, ino, ino, 0, true);
1583
Changman Leeb1a94e82013-11-15 10:42:51 +09001584 src = F2FS_INODE(page);
1585 dst = F2FS_INODE(ipage);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001586
Changman Leeb1a94e82013-11-15 10:42:51 +09001587 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
1588 dst->i_size = 0;
1589 dst->i_blocks = cpu_to_le64(1);
1590 dst->i_links = cpu_to_le32(1);
1591 dst->i_xattr_nid = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001592
1593 new_ni = old_ni;
1594 new_ni.ino = ino;
1595
Changman Leeb1a94e82013-11-15 10:42:51 +09001596 if (unlikely(!inc_valid_node_count(sbi, NULL)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001597 WARN_ON(1);
1598 set_node_addr(sbi, &new_ni, NEW_ADDR);
1599 inc_valid_inode_count(sbi);
1600 f2fs_put_page(ipage, 1);
1601 return 0;
1602}
1603
Changman Leeb1a94e82013-11-15 10:42:51 +09001604/*
1605 * ra_sum_pages() merge contiguous pages into one bio and submit.
1606 * these pre-readed pages are linked in pages list.
1607 */
1608static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
1609 int start, int nrpages)
1610{
1611 struct page *page;
1612 int page_idx = start;
1613 struct f2fs_io_info fio = {
1614 .type = META,
1615 .rw = READ_SYNC | REQ_META | REQ_PRIO
1616 };
1617
1618 for (; page_idx < start + nrpages; page_idx++) {
1619 /* alloc temporal page for read node summary info*/
1620 page = alloc_page(GFP_F2FS_ZERO);
Gu Zheng12e374b2014-03-07 18:43:36 +08001621 if (!page)
1622 break;
Changman Leeb1a94e82013-11-15 10:42:51 +09001623
1624 lock_page(page);
1625 page->index = page_idx;
1626 list_add_tail(&page->lru, pages);
1627 }
1628
1629 list_for_each_entry(page, pages, lru)
1630 f2fs_submit_page_mbio(sbi, page, page->index, &fio);
1631
1632 f2fs_submit_merged_bio(sbi, META, READ);
Gu Zheng12e374b2014-03-07 18:43:36 +08001633
1634 return page_idx - start;
Changman Leeb1a94e82013-11-15 10:42:51 +09001635}
1636
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001637int restore_node_summary(struct f2fs_sb_info *sbi,
1638 unsigned int segno, struct f2fs_summary_block *sum)
1639{
1640 struct f2fs_node *rn;
1641 struct f2fs_summary *sum_entry;
Changman Leeb1a94e82013-11-15 10:42:51 +09001642 struct page *page, *tmp;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001643 block_t addr;
Changman Leeb1a94e82013-11-15 10:42:51 +09001644 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
1645 int i, last_offset, nrpages, err = 0;
1646 LIST_HEAD(page_list);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001647
1648 /* scan the node segment */
1649 last_offset = sbi->blocks_per_seg;
1650 addr = START_BLOCK(sbi, segno);
1651 sum_entry = &sum->entries[0];
1652
Gu Zheng12e374b2014-03-07 18:43:36 +08001653 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
Changman Leeb1a94e82013-11-15 10:42:51 +09001654 nrpages = min(last_offset - i, bio_blocks);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001655
Changman Leeb1a94e82013-11-15 10:42:51 +09001656 /* read ahead node pages */
Gu Zheng12e374b2014-03-07 18:43:36 +08001657 nrpages = ra_sum_pages(sbi, &page_list, addr, nrpages);
1658 if (!nrpages)
1659 return -ENOMEM;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001660
Changman Leeb1a94e82013-11-15 10:42:51 +09001661 list_for_each_entry_safe(page, tmp, &page_list, lru) {
Gu Zheng12e374b2014-03-07 18:43:36 +08001662 if (err)
1663 goto skip;
Changman Leeb1a94e82013-11-15 10:42:51 +09001664
1665 lock_page(page);
1666 if (unlikely(!PageUptodate(page))) {
1667 err = -EIO;
1668 } else {
1669 rn = F2FS_NODE(page);
1670 sum_entry->nid = rn->footer.nid;
1671 sum_entry->version = 0;
1672 sum_entry->ofs_in_node = 0;
1673 sum_entry++;
1674 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001675 unlock_page(page);
Gu Zheng12e374b2014-03-07 18:43:36 +08001676skip:
1677 list_del(&page->lru);
Changman Leeb1a94e82013-11-15 10:42:51 +09001678 __free_pages(page, 0);
1679 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001680 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001681 return err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001682}
1683
1684static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1685{
1686 struct f2fs_nm_info *nm_i = NM_I(sbi);
1687 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1688 struct f2fs_summary_block *sum = curseg->sum_blk;
1689 int i;
1690
1691 mutex_lock(&curseg->curseg_mutex);
1692
1693 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1694 mutex_unlock(&curseg->curseg_mutex);
1695 return false;
1696 }
1697
1698 for (i = 0; i < nats_in_cursum(sum); i++) {
1699 struct nat_entry *ne;
1700 struct f2fs_nat_entry raw_ne;
1701 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1702
1703 raw_ne = nat_in_journal(sum, i);
1704retry:
1705 write_lock(&nm_i->nat_tree_lock);
1706 ne = __lookup_nat_cache(nm_i, nid);
1707 if (ne) {
1708 __set_nat_cache_dirty(nm_i, ne);
1709 write_unlock(&nm_i->nat_tree_lock);
1710 continue;
1711 }
1712 ne = grab_nat_entry(nm_i, nid);
1713 if (!ne) {
1714 write_unlock(&nm_i->nat_tree_lock);
1715 goto retry;
1716 }
1717 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1718 nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1719 nat_set_version(ne, raw_ne.version);
1720 __set_nat_cache_dirty(nm_i, ne);
1721 write_unlock(&nm_i->nat_tree_lock);
1722 }
1723 update_nats_in_cursum(sum, -i);
1724 mutex_unlock(&curseg->curseg_mutex);
1725 return true;
1726}
1727
1728/*
1729 * This function is called during the checkpointing process.
1730 */
1731void flush_nat_entries(struct f2fs_sb_info *sbi)
1732{
1733 struct f2fs_nm_info *nm_i = NM_I(sbi);
1734 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1735 struct f2fs_summary_block *sum = curseg->sum_blk;
1736 struct list_head *cur, *n;
1737 struct page *page = NULL;
1738 struct f2fs_nat_block *nat_blk = NULL;
1739 nid_t start_nid = 0, end_nid = 0;
1740 bool flushed;
1741
1742 flushed = flush_nats_in_journal(sbi);
1743
1744 if (!flushed)
1745 mutex_lock(&curseg->curseg_mutex);
1746
1747 /* 1) flush dirty nat caches */
1748 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1749 struct nat_entry *ne;
1750 nid_t nid;
1751 struct f2fs_nat_entry raw_ne;
1752 int offset = -1;
1753 block_t new_blkaddr;
1754
1755 ne = list_entry(cur, struct nat_entry, list);
1756 nid = nat_get_nid(ne);
1757
1758 if (nat_get_blkaddr(ne) == NEW_ADDR)
1759 continue;
1760 if (flushed)
1761 goto to_nat_page;
1762
1763 /* if there is room for nat enries in curseg->sumpage */
1764 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1765 if (offset >= 0) {
1766 raw_ne = nat_in_journal(sum, offset);
1767 goto flush_now;
1768 }
1769to_nat_page:
1770 if (!page || (start_nid > nid || nid > end_nid)) {
1771 if (page) {
1772 f2fs_put_page(page, 1);
1773 page = NULL;
1774 }
1775 start_nid = START_NID(nid);
1776 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1777
1778 /*
1779 * get nat block with dirty flag, increased reference
1780 * count, mapped and lock
1781 */
1782 page = get_next_nat_page(sbi, start_nid);
1783 nat_blk = page_address(page);
1784 }
1785
1786 f2fs_bug_on(!nat_blk);
1787 raw_ne = nat_blk->entries[nid - start_nid];
1788flush_now:
1789 new_blkaddr = nat_get_blkaddr(ne);
1790
1791 raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1792 raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1793 raw_ne.version = nat_get_version(ne);
1794
1795 if (offset < 0) {
1796 nat_blk->entries[nid - start_nid] = raw_ne;
1797 } else {
1798 nat_in_journal(sum, offset) = raw_ne;
1799 nid_in_journal(sum, offset) = cpu_to_le32(nid);
1800 }
1801
1802 if (nat_get_blkaddr(ne) == NULL_ADDR &&
1803 add_free_nid(NM_I(sbi), nid, false) <= 0) {
1804 write_lock(&nm_i->nat_tree_lock);
1805 __del_from_nat_cache(nm_i, ne);
1806 write_unlock(&nm_i->nat_tree_lock);
1807 } else {
1808 write_lock(&nm_i->nat_tree_lock);
1809 __clear_nat_cache_dirty(nm_i, ne);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001810 write_unlock(&nm_i->nat_tree_lock);
1811 }
1812 }
1813 if (!flushed)
1814 mutex_unlock(&curseg->curseg_mutex);
1815 f2fs_put_page(page, 1);
1816
1817 /* 2) shrink nat caches if necessary */
1818 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1819}
1820
1821static int init_node_manager(struct f2fs_sb_info *sbi)
1822{
1823 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1824 struct f2fs_nm_info *nm_i = NM_I(sbi);
1825 unsigned char *version_bitmap;
1826 unsigned int nat_segs, nat_blocks;
1827
1828 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1829
1830 /* segment_count_nat includes pair segment so divide to 2. */
1831 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1832 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
Jaegeuk Kim8fa144b2014-02-17 12:44:20 +09001833
1834 /* not used nids: 0, node, meta, (and root counted as valid node) */
1835 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks - 3;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001836 nm_i->fcnt = 0;
1837 nm_i->nat_cnt = 0;
1838
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001839 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001840 INIT_LIST_HEAD(&nm_i->free_nid_list);
1841 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1842 INIT_LIST_HEAD(&nm_i->nat_entries);
1843 INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1844
1845 mutex_init(&nm_i->build_lock);
1846 spin_lock_init(&nm_i->free_nid_list_lock);
1847 rwlock_init(&nm_i->nat_tree_lock);
1848
1849 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1850 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1851 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1852 if (!version_bitmap)
1853 return -EFAULT;
1854
1855 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1856 GFP_KERNEL);
1857 if (!nm_i->nat_bitmap)
1858 return -ENOMEM;
1859 return 0;
1860}
1861
1862int build_node_manager(struct f2fs_sb_info *sbi)
1863{
1864 int err;
1865
1866 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1867 if (!sbi->nm_info)
1868 return -ENOMEM;
1869
1870 err = init_node_manager(sbi);
1871 if (err)
1872 return err;
1873
1874 build_free_nids(sbi);
1875 return 0;
1876}
1877
1878void destroy_node_manager(struct f2fs_sb_info *sbi)
1879{
1880 struct f2fs_nm_info *nm_i = NM_I(sbi);
1881 struct free_nid *i, *next_i;
1882 struct nat_entry *natvec[NATVEC_SIZE];
1883 nid_t nid = 0;
1884 unsigned int found;
1885
1886 if (!nm_i)
1887 return;
1888
1889 /* destroy free nid list */
1890 spin_lock(&nm_i->free_nid_list_lock);
1891 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1892 f2fs_bug_on(i->state == NID_ALLOC);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001893 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001894 nm_i->fcnt--;
1895 }
1896 f2fs_bug_on(nm_i->fcnt);
1897 spin_unlock(&nm_i->free_nid_list_lock);
1898
1899 /* destroy nat cache */
1900 write_lock(&nm_i->nat_tree_lock);
1901 while ((found = __gang_lookup_nat_cache(nm_i,
1902 nid, NATVEC_SIZE, natvec))) {
1903 unsigned idx;
Gu Zheng0c97ea92014-03-07 18:43:24 +08001904 nid = nat_get_nid(natvec[found - 1]) + 1;
1905 for (idx = 0; idx < found; idx++)
1906 __del_from_nat_cache(nm_i, natvec[idx]);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001907 }
1908 f2fs_bug_on(nm_i->nat_cnt);
1909 write_unlock(&nm_i->nat_tree_lock);
1910
1911 kfree(nm_i->nat_bitmap);
1912 sbi->nm_info = NULL;
1913 kfree(nm_i);
1914}
1915
1916int __init create_node_manager_caches(void)
1917{
1918 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
Gu Zhenge33dcea2014-03-07 18:43:28 +08001919 sizeof(struct nat_entry));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001920 if (!nat_entry_slab)
1921 return -ENOMEM;
1922
1923 free_nid_slab = f2fs_kmem_cache_create("free_nid",
Gu Zhenge33dcea2014-03-07 18:43:28 +08001924 sizeof(struct free_nid));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001925 if (!free_nid_slab) {
1926 kmem_cache_destroy(nat_entry_slab);
1927 return -ENOMEM;
1928 }
1929 return 0;
1930}
1931
1932void destroy_node_manager_caches(void)
1933{
1934 kmem_cache_destroy(free_nid_slab);
1935 kmem_cache_destroy(nat_entry_slab);
1936}