blob: e723d1184b6affe82030c5da309c284028b5251f [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include <trace/events/f2fs.h>
23
Gu Zhengaa9c8b02014-02-21 18:08:29 +080024#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
25
Linus Torvalds8005ecc2012-12-20 13:54:51 -080026static struct kmem_cache *nat_entry_slab;
27static struct kmem_cache *free_nid_slab;
28
Jaegeuk Kim250c7692014-04-16 10:47:06 +090029bool available_free_memory(struct f2fs_sb_info *sbi, int type)
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090030{
Jaegeuk Kim250c7692014-04-16 10:47:06 +090031 struct f2fs_nm_info *nm_i = NM_I(sbi);
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090032 struct sysinfo val;
33 unsigned long mem_size = 0;
Jaegeuk Kim250c7692014-04-16 10:47:06 +090034 bool res = false;
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090035
36 si_meminfo(&val);
Jaegeuk Kim250c7692014-04-16 10:47:06 +090037 /* give 25%, 25%, 50% memory for each components respectively */
38 if (type == FREE_NIDS) {
39 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12;
40 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
41 } else if (type == NAT_ENTRIES) {
42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
44 } else if (type == DIRTY_DENTS) {
45 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
46 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
47 }
48 return res;
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090049}
50
Linus Torvalds8005ecc2012-12-20 13:54:51 -080051static void clear_node_page_dirty(struct page *page)
52{
53 struct address_space *mapping = page->mapping;
54 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
55 unsigned int long flags;
56
57 if (PageDirty(page)) {
58 spin_lock_irqsave(&mapping->tree_lock, flags);
59 radix_tree_tag_clear(&mapping->page_tree,
60 page_index(page),
61 PAGECACHE_TAG_DIRTY);
62 spin_unlock_irqrestore(&mapping->tree_lock, flags);
63
64 clear_page_dirty_for_io(page);
65 dec_page_count(sbi, F2FS_DIRTY_NODES);
66 }
67 ClearPageUptodate(page);
68}
69
70static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
71{
72 pgoff_t index = current_nat_addr(sbi, nid);
73 return get_meta_page(sbi, index);
74}
75
76static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
77{
78 struct page *src_page;
79 struct page *dst_page;
80 pgoff_t src_off;
81 pgoff_t dst_off;
82 void *src_addr;
83 void *dst_addr;
84 struct f2fs_nm_info *nm_i = NM_I(sbi);
85
86 src_off = current_nat_addr(sbi, nid);
87 dst_off = next_nat_addr(sbi, src_off);
88
89 /* get current nat block page with lock */
90 src_page = get_meta_page(sbi, src_off);
91
92 /* Dirty src_page means that it is already the new target NAT page. */
93 if (PageDirty(src_page))
94 return src_page;
95
96 dst_page = grab_meta_page(sbi, dst_off);
97
98 src_addr = page_address(src_page);
99 dst_addr = page_address(dst_page);
100 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
101 set_page_dirty(dst_page);
102 f2fs_put_page(src_page, 1);
103
104 set_to_next_nat(nm_i, nid);
105
106 return dst_page;
107}
108
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800109static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
110{
111 return radix_tree_lookup(&nm_i->nat_root, n);
112}
113
114static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
115 nid_t start, unsigned int nr, struct nat_entry **ep)
116{
117 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
118}
119
120static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
121{
122 list_del(&e->list);
123 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
124 nm_i->nat_cnt--;
125 kmem_cache_free(nat_entry_slab, e);
126}
127
128int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
129{
130 struct f2fs_nm_info *nm_i = NM_I(sbi);
131 struct nat_entry *e;
132 int is_cp = 1;
133
134 read_lock(&nm_i->nat_tree_lock);
135 e = __lookup_nat_cache(nm_i, nid);
136 if (e && !e->checkpointed)
137 is_cp = 0;
138 read_unlock(&nm_i->nat_tree_lock);
139 return is_cp;
140}
141
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900142bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid)
143{
144 struct f2fs_nm_info *nm_i = NM_I(sbi);
145 struct nat_entry *e;
146 bool fsync_done = false;
147
148 read_lock(&nm_i->nat_tree_lock);
149 e = __lookup_nat_cache(nm_i, nid);
150 if (e)
151 fsync_done = e->fsync_done;
152 read_unlock(&nm_i->nat_tree_lock);
153 return fsync_done;
154}
155
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800156static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
157{
158 struct nat_entry *new;
159
160 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
161 if (!new)
162 return NULL;
163 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
164 kmem_cache_free(nat_entry_slab, new);
165 return NULL;
166 }
167 memset(new, 0, sizeof(struct nat_entry));
168 nat_set_nid(new, nid);
Jaegeuk Kimc5ed3b72014-02-21 13:17:22 +0900169 new->checkpointed = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800170 list_add_tail(&new->list, &nm_i->nat_entries);
171 nm_i->nat_cnt++;
172 return new;
173}
174
175static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
176 struct f2fs_nat_entry *ne)
177{
178 struct nat_entry *e;
179retry:
180 write_lock(&nm_i->nat_tree_lock);
181 e = __lookup_nat_cache(nm_i, nid);
182 if (!e) {
183 e = grab_nat_entry(nm_i, nid);
184 if (!e) {
185 write_unlock(&nm_i->nat_tree_lock);
186 goto retry;
187 }
Chao Yu17eac8c2014-04-17 10:51:05 +0800188 node_info_from_raw_nat(&e->ni, ne);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800189 }
190 write_unlock(&nm_i->nat_tree_lock);
191}
192
193static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900194 block_t new_blkaddr, bool fsync_done)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800195{
196 struct f2fs_nm_info *nm_i = NM_I(sbi);
197 struct nat_entry *e;
198retry:
199 write_lock(&nm_i->nat_tree_lock);
200 e = __lookup_nat_cache(nm_i, ni->nid);
201 if (!e) {
202 e = grab_nat_entry(nm_i, ni->nid);
203 if (!e) {
204 write_unlock(&nm_i->nat_tree_lock);
205 goto retry;
206 }
207 e->ni = *ni;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800208 f2fs_bug_on(ni->blk_addr == NEW_ADDR);
209 } else if (new_blkaddr == NEW_ADDR) {
210 /*
211 * when nid is reallocated,
212 * previous nat entry can be remained in nat cache.
213 * So, reinitialize it with new information.
214 */
215 e->ni = *ni;
216 f2fs_bug_on(ni->blk_addr != NULL_ADDR);
217 }
218
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800219 /* sanity check */
220 f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
221 f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
222 new_blkaddr == NULL_ADDR);
223 f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
224 new_blkaddr == NEW_ADDR);
225 f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
226 nat_get_blkaddr(e) != NULL_ADDR &&
227 new_blkaddr == NEW_ADDR);
228
229 /* increament version no as node is removed */
230 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
231 unsigned char version = nat_get_version(e);
232 nat_set_version(e, inc_node_version(version));
233 }
234
235 /* change address */
236 nat_set_blkaddr(e, new_blkaddr);
237 __set_nat_cache_dirty(nm_i, e);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900238
239 /* update fsync_mark if its inode nat entry is still alive */
240 e = __lookup_nat_cache(nm_i, ni->ino);
241 if (e)
242 e->fsync_done = fsync_done;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800243 write_unlock(&nm_i->nat_tree_lock);
244}
245
246int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
247{
248 struct f2fs_nm_info *nm_i = NM_I(sbi);
249
Jaegeuk Kim250c7692014-04-16 10:47:06 +0900250 if (available_free_memory(sbi, NAT_ENTRIES))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800251 return 0;
252
253 write_lock(&nm_i->nat_tree_lock);
254 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
255 struct nat_entry *ne;
256 ne = list_first_entry(&nm_i->nat_entries,
257 struct nat_entry, list);
258 __del_from_nat_cache(nm_i, ne);
259 nr_shrink--;
260 }
261 write_unlock(&nm_i->nat_tree_lock);
262 return nr_shrink;
263}
264
265/*
266 * This function returns always success
267 */
268void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
269{
270 struct f2fs_nm_info *nm_i = NM_I(sbi);
271 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
272 struct f2fs_summary_block *sum = curseg->sum_blk;
273 nid_t start_nid = START_NID(nid);
274 struct f2fs_nat_block *nat_blk;
275 struct page *page = NULL;
276 struct f2fs_nat_entry ne;
277 struct nat_entry *e;
278 int i;
279
280 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
281 ni->nid = nid;
282
283 /* Check nat cache */
284 read_lock(&nm_i->nat_tree_lock);
285 e = __lookup_nat_cache(nm_i, nid);
286 if (e) {
287 ni->ino = nat_get_ino(e);
288 ni->blk_addr = nat_get_blkaddr(e);
289 ni->version = nat_get_version(e);
290 }
291 read_unlock(&nm_i->nat_tree_lock);
292 if (e)
293 return;
294
295 /* Check current segment summary */
296 mutex_lock(&curseg->curseg_mutex);
297 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
298 if (i >= 0) {
299 ne = nat_in_journal(sum, i);
300 node_info_from_raw_nat(ni, &ne);
301 }
302 mutex_unlock(&curseg->curseg_mutex);
303 if (i >= 0)
304 goto cache;
305
306 /* Fill node_info from nat page */
307 page = get_current_nat_page(sbi, start_nid);
308 nat_blk = (struct f2fs_nat_block *)page_address(page);
309 ne = nat_blk->entries[nid - start_nid];
310 node_info_from_raw_nat(ni, &ne);
311 f2fs_put_page(page, 1);
312cache:
313 /* cache nat entry */
314 cache_nat_entry(NM_I(sbi), nid, &ne);
315}
316
317/*
318 * The maximum depth is four.
319 * Offset[0] will have raw inode offset.
320 */
321static int get_node_path(struct f2fs_inode_info *fi, long block,
322 int offset[4], unsigned int noffset[4])
323{
324 const long direct_index = ADDRS_PER_INODE(fi);
325 const long direct_blks = ADDRS_PER_BLOCK;
326 const long dptrs_per_blk = NIDS_PER_BLOCK;
327 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
328 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
329 int n = 0;
330 int level = 0;
331
332 noffset[0] = 0;
333
334 if (block < direct_index) {
335 offset[n] = block;
336 goto got;
337 }
338 block -= direct_index;
339 if (block < direct_blks) {
340 offset[n++] = NODE_DIR1_BLOCK;
341 noffset[n] = 1;
342 offset[n] = block;
343 level = 1;
344 goto got;
345 }
346 block -= direct_blks;
347 if (block < direct_blks) {
348 offset[n++] = NODE_DIR2_BLOCK;
349 noffset[n] = 2;
350 offset[n] = block;
351 level = 1;
352 goto got;
353 }
354 block -= direct_blks;
355 if (block < indirect_blks) {
356 offset[n++] = NODE_IND1_BLOCK;
357 noffset[n] = 3;
358 offset[n++] = block / direct_blks;
359 noffset[n] = 4 + offset[n - 1];
360 offset[n] = block % direct_blks;
361 level = 2;
362 goto got;
363 }
364 block -= indirect_blks;
365 if (block < indirect_blks) {
366 offset[n++] = NODE_IND2_BLOCK;
367 noffset[n] = 4 + dptrs_per_blk;
368 offset[n++] = block / direct_blks;
369 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
370 offset[n] = block % direct_blks;
371 level = 2;
372 goto got;
373 }
374 block -= indirect_blks;
375 if (block < dindirect_blks) {
376 offset[n++] = NODE_DIND_BLOCK;
377 noffset[n] = 5 + (dptrs_per_blk * 2);
378 offset[n++] = block / indirect_blks;
379 noffset[n] = 6 + (dptrs_per_blk * 2) +
380 offset[n - 1] * (dptrs_per_blk + 1);
381 offset[n++] = (block / direct_blks) % dptrs_per_blk;
382 noffset[n] = 7 + (dptrs_per_blk * 2) +
383 offset[n - 2] * (dptrs_per_blk + 1) +
384 offset[n - 1];
385 offset[n] = block % direct_blks;
386 level = 3;
387 goto got;
388 } else {
389 BUG();
390 }
391got:
392 return level;
393}
394
395/*
396 * Caller should call f2fs_put_dnode(dn).
Changman Leeb1a94e82013-11-15 10:42:51 +0900397 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
398 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800399 * In the case of RDONLY_NODE, we don't need to care about mutex.
400 */
401int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
402{
403 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
404 struct page *npage[4];
405 struct page *parent;
406 int offset[4];
407 unsigned int noffset[4];
408 nid_t nids[4];
409 int level, i;
410 int err = 0;
411
412 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
413
414 nids[0] = dn->inode->i_ino;
415 npage[0] = dn->inode_page;
416
417 if (!npage[0]) {
418 npage[0] = get_node_page(sbi, nids[0]);
419 if (IS_ERR(npage[0]))
420 return PTR_ERR(npage[0]);
421 }
422 parent = npage[0];
423 if (level != 0)
424 nids[1] = get_nid(parent, offset[0], true);
425 dn->inode_page = npage[0];
426 dn->inode_page_locked = true;
427
428 /* get indirect or direct nodes */
429 for (i = 1; i <= level; i++) {
430 bool done = false;
431
432 if (!nids[i] && mode == ALLOC_NODE) {
433 /* alloc new node */
434 if (!alloc_nid(sbi, &(nids[i]))) {
435 err = -ENOSPC;
436 goto release_pages;
437 }
438
439 dn->nid = nids[i];
440 npage[i] = new_node_page(dn, noffset[i], NULL);
441 if (IS_ERR(npage[i])) {
442 alloc_nid_failed(sbi, nids[i]);
443 err = PTR_ERR(npage[i]);
444 goto release_pages;
445 }
446
447 set_nid(parent, offset[i - 1], nids[i], i == 1);
448 alloc_nid_done(sbi, nids[i]);
449 done = true;
450 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
451 npage[i] = get_node_page_ra(parent, offset[i - 1]);
452 if (IS_ERR(npage[i])) {
453 err = PTR_ERR(npage[i]);
454 goto release_pages;
455 }
456 done = true;
457 }
458 if (i == 1) {
459 dn->inode_page_locked = false;
460 unlock_page(parent);
461 } else {
462 f2fs_put_page(parent, 1);
463 }
464
465 if (!done) {
466 npage[i] = get_node_page(sbi, nids[i]);
467 if (IS_ERR(npage[i])) {
468 err = PTR_ERR(npage[i]);
469 f2fs_put_page(npage[0], 0);
470 goto release_out;
471 }
472 }
473 if (i < level) {
474 parent = npage[i];
475 nids[i + 1] = get_nid(parent, offset[i], false);
476 }
477 }
478 dn->nid = nids[level];
479 dn->ofs_in_node = offset[level];
480 dn->node_page = npage[level];
481 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
482 return 0;
483
484release_pages:
485 f2fs_put_page(parent, 1);
486 if (i > 1)
487 f2fs_put_page(npage[0], 0);
488release_out:
489 dn->inode_page = NULL;
490 dn->node_page = NULL;
491 return err;
492}
493
494static void truncate_node(struct dnode_of_data *dn)
495{
496 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
497 struct node_info ni;
498
499 get_node_info(sbi, dn->nid, &ni);
500 if (dn->inode->i_blocks == 0) {
501 f2fs_bug_on(ni.blk_addr != NULL_ADDR);
502 goto invalidate;
503 }
504 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
505
506 /* Deallocate node address */
507 invalidate_blocks(sbi, ni.blk_addr);
Changman Leeb1a94e82013-11-15 10:42:51 +0900508 dec_valid_node_count(sbi, dn->inode);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900509 set_node_addr(sbi, &ni, NULL_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800510
511 if (dn->nid == dn->inode->i_ino) {
512 remove_orphan_inode(sbi, dn->nid);
513 dec_valid_inode_count(sbi);
514 } else {
515 sync_inode_page(dn);
516 }
517invalidate:
518 clear_node_page_dirty(dn->node_page);
519 F2FS_SET_SB_DIRT(sbi);
520
521 f2fs_put_page(dn->node_page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900522
523 invalidate_mapping_pages(NODE_MAPPING(sbi),
524 dn->node_page->index, dn->node_page->index);
525
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800526 dn->node_page = NULL;
527 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
528}
529
530static int truncate_dnode(struct dnode_of_data *dn)
531{
532 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
533 struct page *page;
534
535 if (dn->nid == 0)
536 return 1;
537
538 /* get direct node */
539 page = get_node_page(sbi, dn->nid);
540 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
541 return 1;
542 else if (IS_ERR(page))
543 return PTR_ERR(page);
544
545 /* Make dnode_of_data for parameter */
546 dn->node_page = page;
547 dn->ofs_in_node = 0;
548 truncate_data_blocks(dn);
549 truncate_node(dn);
550 return 1;
551}
552
553static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
554 int ofs, int depth)
555{
556 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
557 struct dnode_of_data rdn = *dn;
558 struct page *page;
559 struct f2fs_node *rn;
560 nid_t child_nid;
561 unsigned int child_nofs;
562 int freed = 0;
563 int i, ret;
564
565 if (dn->nid == 0)
566 return NIDS_PER_BLOCK + 1;
567
568 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
569
570 page = get_node_page(sbi, dn->nid);
571 if (IS_ERR(page)) {
572 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
573 return PTR_ERR(page);
574 }
575
576 rn = F2FS_NODE(page);
577 if (depth < 3) {
578 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
579 child_nid = le32_to_cpu(rn->in.nid[i]);
580 if (child_nid == 0)
581 continue;
582 rdn.nid = child_nid;
583 ret = truncate_dnode(&rdn);
584 if (ret < 0)
585 goto out_err;
586 set_nid(page, i, 0, false);
587 }
588 } else {
589 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
590 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
591 child_nid = le32_to_cpu(rn->in.nid[i]);
592 if (child_nid == 0) {
593 child_nofs += NIDS_PER_BLOCK + 1;
594 continue;
595 }
596 rdn.nid = child_nid;
597 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
598 if (ret == (NIDS_PER_BLOCK + 1)) {
599 set_nid(page, i, 0, false);
600 child_nofs += ret;
601 } else if (ret < 0 && ret != -ENOENT) {
602 goto out_err;
603 }
604 }
605 freed = child_nofs;
606 }
607
608 if (!ofs) {
609 /* remove current indirect node */
610 dn->node_page = page;
611 truncate_node(dn);
612 freed++;
613 } else {
614 f2fs_put_page(page, 1);
615 }
616 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
617 return freed;
618
619out_err:
620 f2fs_put_page(page, 1);
621 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
622 return ret;
623}
624
625static int truncate_partial_nodes(struct dnode_of_data *dn,
626 struct f2fs_inode *ri, int *offset, int depth)
627{
628 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
629 struct page *pages[2];
630 nid_t nid[3];
631 nid_t child_nid;
632 int err = 0;
633 int i;
634 int idx = depth - 2;
635
636 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
637 if (!nid[0])
638 return 0;
639
640 /* get indirect nodes in the path */
Changman Leeb1a94e82013-11-15 10:42:51 +0900641 for (i = 0; i < idx + 1; i++) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800642 /* refernece count'll be increased */
643 pages[i] = get_node_page(sbi, nid[i]);
644 if (IS_ERR(pages[i])) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800645 err = PTR_ERR(pages[i]);
Changman Leeb1a94e82013-11-15 10:42:51 +0900646 idx = i - 1;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800647 goto fail;
648 }
649 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
650 }
651
652 /* free direct nodes linked to a partial indirect node */
Changman Leeb1a94e82013-11-15 10:42:51 +0900653 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800654 child_nid = get_nid(pages[idx], i, false);
655 if (!child_nid)
656 continue;
657 dn->nid = child_nid;
658 err = truncate_dnode(dn);
659 if (err < 0)
660 goto fail;
661 set_nid(pages[idx], i, 0, false);
662 }
663
Changman Leeb1a94e82013-11-15 10:42:51 +0900664 if (offset[idx + 1] == 0) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800665 dn->node_page = pages[idx];
666 dn->nid = nid[idx];
667 truncate_node(dn);
668 } else {
669 f2fs_put_page(pages[idx], 1);
670 }
671 offset[idx]++;
Changman Leeb1a94e82013-11-15 10:42:51 +0900672 offset[idx + 1] = 0;
673 idx--;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800674fail:
Changman Leeb1a94e82013-11-15 10:42:51 +0900675 for (i = idx; i >= 0; i--)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800676 f2fs_put_page(pages[i], 1);
677
678 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
679
680 return err;
681}
682
683/*
684 * All the block addresses of data and nodes should be nullified.
685 */
686int truncate_inode_blocks(struct inode *inode, pgoff_t from)
687{
688 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800689 int err = 0, cont = 1;
690 int level, offset[4], noffset[4];
691 unsigned int nofs = 0;
Changman Leeb1a94e82013-11-15 10:42:51 +0900692 struct f2fs_inode *ri;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800693 struct dnode_of_data dn;
694 struct page *page;
695
696 trace_f2fs_truncate_inode_blocks_enter(inode, from);
697
698 level = get_node_path(F2FS_I(inode), from, offset, noffset);
699restart:
700 page = get_node_page(sbi, inode->i_ino);
701 if (IS_ERR(page)) {
702 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
703 return PTR_ERR(page);
704 }
705
706 set_new_dnode(&dn, inode, page, NULL, 0);
707 unlock_page(page);
708
Changman Leeb1a94e82013-11-15 10:42:51 +0900709 ri = F2FS_INODE(page);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800710 switch (level) {
711 case 0:
712 case 1:
713 nofs = noffset[1];
714 break;
715 case 2:
716 nofs = noffset[1];
717 if (!offset[level - 1])
718 goto skip_partial;
Changman Leeb1a94e82013-11-15 10:42:51 +0900719 err = truncate_partial_nodes(&dn, ri, offset, level);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800720 if (err < 0 && err != -ENOENT)
721 goto fail;
722 nofs += 1 + NIDS_PER_BLOCK;
723 break;
724 case 3:
725 nofs = 5 + 2 * NIDS_PER_BLOCK;
726 if (!offset[level - 1])
727 goto skip_partial;
Changman Leeb1a94e82013-11-15 10:42:51 +0900728 err = truncate_partial_nodes(&dn, ri, offset, level);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800729 if (err < 0 && err != -ENOENT)
730 goto fail;
731 break;
732 default:
733 BUG();
734 }
735
736skip_partial:
737 while (cont) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900738 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800739 switch (offset[0]) {
740 case NODE_DIR1_BLOCK:
741 case NODE_DIR2_BLOCK:
742 err = truncate_dnode(&dn);
743 break;
744
745 case NODE_IND1_BLOCK:
746 case NODE_IND2_BLOCK:
747 err = truncate_nodes(&dn, nofs, offset[1], 2);
748 break;
749
750 case NODE_DIND_BLOCK:
751 err = truncate_nodes(&dn, nofs, offset[1], 3);
752 cont = 0;
753 break;
754
755 default:
756 BUG();
757 }
758 if (err < 0 && err != -ENOENT)
759 goto fail;
760 if (offset[1] == 0 &&
Changman Leeb1a94e82013-11-15 10:42:51 +0900761 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800762 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900763 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800764 f2fs_put_page(page, 1);
765 goto restart;
766 }
Jaegeuk Kim4b66d802014-03-18 13:29:07 +0900767 f2fs_wait_on_page_writeback(page, NODE);
Changman Leeb1a94e82013-11-15 10:42:51 +0900768 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800769 set_page_dirty(page);
770 unlock_page(page);
771 }
772 offset[1] = 0;
773 offset[0]++;
774 nofs += err;
775 }
776fail:
777 f2fs_put_page(page, 0);
778 trace_f2fs_truncate_inode_blocks_exit(inode, err);
779 return err > 0 ? 0 : err;
780}
781
782int truncate_xattr_node(struct inode *inode, struct page *page)
783{
784 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
785 nid_t nid = F2FS_I(inode)->i_xattr_nid;
786 struct dnode_of_data dn;
787 struct page *npage;
788
789 if (!nid)
790 return 0;
791
792 npage = get_node_page(sbi, nid);
793 if (IS_ERR(npage))
794 return PTR_ERR(npage);
795
796 F2FS_I(inode)->i_xattr_nid = 0;
797
798 /* need to do checkpoint during fsync */
799 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
800
801 set_new_dnode(&dn, inode, page, npage, nid);
802
803 if (page)
Changman Leeb1a94e82013-11-15 10:42:51 +0900804 dn.inode_page_locked = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800805 truncate_node(&dn);
806 return 0;
807}
808
809/*
Changman Leeb1a94e82013-11-15 10:42:51 +0900810 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
811 * f2fs_unlock_op().
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800812 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900813void remove_inode_page(struct inode *inode)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800814{
815 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
816 struct page *page;
817 nid_t ino = inode->i_ino;
818 struct dnode_of_data dn;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800819
820 page = get_node_page(sbi, ino);
821 if (IS_ERR(page))
Changman Leeb1a94e82013-11-15 10:42:51 +0900822 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800823
Changman Leeb1a94e82013-11-15 10:42:51 +0900824 if (truncate_xattr_node(inode, page)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800825 f2fs_put_page(page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900826 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800827 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800828 /* 0 is possible, after f2fs_new_inode() is failed */
829 f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
830 set_new_dnode(&dn, inode, page, page, ino);
831 truncate_node(&dn);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800832}
833
834struct page *new_inode_page(struct inode *inode, const struct qstr *name)
835{
836 struct dnode_of_data dn;
837
838 /* allocate inode page for new inode */
839 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
840
841 /* caller should f2fs_put_page(page, 1); */
842 return new_node_page(&dn, 0, NULL);
843}
844
845struct page *new_node_page(struct dnode_of_data *dn,
846 unsigned int ofs, struct page *ipage)
847{
848 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800849 struct node_info old_ni, new_ni;
850 struct page *page;
851 int err;
852
Changman Leeb1a94e82013-11-15 10:42:51 +0900853 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800854 return ERR_PTR(-EPERM);
855
Jaegeuk Kim0f9e6b52014-04-29 17:28:32 +0900856 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800857 if (!page)
858 return ERR_PTR(-ENOMEM);
859
Changman Leeb1a94e82013-11-15 10:42:51 +0900860 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800861 err = -ENOSPC;
862 goto fail;
863 }
864
865 get_node_info(sbi, dn->nid, &old_ni);
866
867 /* Reinitialize old_ni with new node page */
868 f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
869 new_ni = old_ni;
870 new_ni.ino = dn->inode->i_ino;
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900871 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800872
Jaegeuk Kim0f9e6b52014-04-29 17:28:32 +0900873 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800874 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
875 set_cold_node(dn->inode, page);
876 SetPageUptodate(page);
877 set_page_dirty(page);
878
Chao Yueea95c42014-03-17 16:35:06 +0800879 if (f2fs_has_xattr_block(ofs))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800880 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
881
882 dn->node_page = page;
883 if (ipage)
884 update_inode(dn->inode, ipage);
885 else
886 sync_inode_page(dn);
887 if (ofs == 0)
888 inc_valid_inode_count(sbi);
889
890 return page;
891
892fail:
893 clear_node_page_dirty(page);
894 f2fs_put_page(page, 1);
895 return ERR_PTR(err);
896}
897
898/*
899 * Caller should do after getting the following values.
900 * 0: f2fs_put_page(page, 0)
901 * LOCKED_PAGE: f2fs_put_page(page, 1)
902 * error: nothing
903 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900904static int read_node_page(struct page *page, int rw)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800905{
906 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
907 struct node_info ni;
908
909 get_node_info(sbi, page->index, &ni);
910
Changman Leeb1a94e82013-11-15 10:42:51 +0900911 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800912 f2fs_put_page(page, 1);
913 return -ENOENT;
914 }
915
916 if (PageUptodate(page))
917 return LOCKED_PAGE;
918
Changman Leeb1a94e82013-11-15 10:42:51 +0900919 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800920}
921
922/*
923 * Readahead a node page
924 */
925void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
926{
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800927 struct page *apage;
928 int err;
929
Changman Leeb1a94e82013-11-15 10:42:51 +0900930 apage = find_get_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800931 if (apage && PageUptodate(apage)) {
932 f2fs_put_page(apage, 0);
933 return;
934 }
935 f2fs_put_page(apage, 0);
936
Changman Leeb1a94e82013-11-15 10:42:51 +0900937 apage = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800938 if (!apage)
939 return;
940
941 err = read_node_page(apage, READA);
942 if (err == 0)
943 f2fs_put_page(apage, 0);
944 else if (err == LOCKED_PAGE)
945 f2fs_put_page(apage, 1);
946}
947
948struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
949{
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800950 struct page *page;
951 int err;
952repeat:
Jaegeuk Kim0f9e6b52014-04-29 17:28:32 +0900953 page = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800954 if (!page)
955 return ERR_PTR(-ENOMEM);
956
957 err = read_node_page(page, READ_SYNC);
958 if (err < 0)
959 return ERR_PTR(err);
960 else if (err == LOCKED_PAGE)
961 goto got_it;
962
963 lock_page(page);
Jaegeuk Kima68f2892014-04-01 17:38:26 +0900964 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800965 f2fs_put_page(page, 1);
966 return ERR_PTR(-EIO);
967 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900968 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800969 f2fs_put_page(page, 1);
970 goto repeat;
971 }
972got_it:
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800973 mark_page_accessed(page);
974 return page;
975}
976
977/*
978 * Return a locked page for the desired node page.
979 * And, readahead MAX_RA_NODE number of node pages.
980 */
981struct page *get_node_page_ra(struct page *parent, int start)
982{
983 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800984 struct blk_plug plug;
985 struct page *page;
986 int err, i, end;
987 nid_t nid;
988
989 /* First, try getting the desired direct node. */
990 nid = get_nid(parent, start, false);
991 if (!nid)
992 return ERR_PTR(-ENOENT);
993repeat:
Changman Leeb1a94e82013-11-15 10:42:51 +0900994 page = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800995 if (!page)
996 return ERR_PTR(-ENOMEM);
997
998 err = read_node_page(page, READ_SYNC);
999 if (err < 0)
1000 return ERR_PTR(err);
1001 else if (err == LOCKED_PAGE)
1002 goto page_hit;
1003
1004 blk_start_plug(&plug);
1005
1006 /* Then, try readahead for siblings of the desired node */
1007 end = start + MAX_RA_NODE;
1008 end = min(end, NIDS_PER_BLOCK);
1009 for (i = start + 1; i < end; i++) {
1010 nid = get_nid(parent, i, false);
1011 if (!nid)
1012 continue;
1013 ra_node_page(sbi, nid);
1014 }
1015
1016 blk_finish_plug(&plug);
1017
1018 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001019 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001020 f2fs_put_page(page, 1);
1021 goto repeat;
1022 }
1023page_hit:
Changman Leeb1a94e82013-11-15 10:42:51 +09001024 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001025 f2fs_put_page(page, 1);
1026 return ERR_PTR(-EIO);
1027 }
1028 mark_page_accessed(page);
1029 return page;
1030}
1031
1032void sync_inode_page(struct dnode_of_data *dn)
1033{
1034 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1035 update_inode(dn->inode, dn->node_page);
1036 } else if (dn->inode_page) {
1037 if (!dn->inode_page_locked)
1038 lock_page(dn->inode_page);
1039 update_inode(dn->inode, dn->inode_page);
1040 if (!dn->inode_page_locked)
1041 unlock_page(dn->inode_page);
1042 } else {
1043 update_inode_page(dn->inode);
1044 }
1045}
1046
1047int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1048 struct writeback_control *wbc)
1049{
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001050 pgoff_t index, end;
1051 struct pagevec pvec;
1052 int step = ino ? 2 : 0;
1053 int nwritten = 0, wrote = 0;
1054
1055 pagevec_init(&pvec, 0);
1056
1057next_step:
1058 index = 0;
1059 end = LONG_MAX;
1060
1061 while (index <= end) {
1062 int i, nr_pages;
Changman Leeb1a94e82013-11-15 10:42:51 +09001063 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001064 PAGECACHE_TAG_DIRTY,
1065 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1066 if (nr_pages == 0)
1067 break;
1068
1069 for (i = 0; i < nr_pages; i++) {
1070 struct page *page = pvec.pages[i];
1071
1072 /*
1073 * flushing sequence with step:
1074 * 0. indirect nodes
1075 * 1. dentry dnodes
1076 * 2. file dnodes
1077 */
1078 if (step == 0 && IS_DNODE(page))
1079 continue;
1080 if (step == 1 && (!IS_DNODE(page) ||
1081 is_cold_node(page)))
1082 continue;
1083 if (step == 2 && (!IS_DNODE(page) ||
1084 !is_cold_node(page)))
1085 continue;
1086
1087 /*
1088 * If an fsync mode,
1089 * we should not skip writing node pages.
1090 */
1091 if (ino && ino_of_node(page) == ino)
1092 lock_page(page);
1093 else if (!trylock_page(page))
1094 continue;
1095
Changman Leeb1a94e82013-11-15 10:42:51 +09001096 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001097continue_unlock:
1098 unlock_page(page);
1099 continue;
1100 }
1101 if (ino && ino_of_node(page) != ino)
1102 goto continue_unlock;
1103
1104 if (!PageDirty(page)) {
1105 /* someone wrote it for us */
1106 goto continue_unlock;
1107 }
1108
1109 if (!clear_page_dirty_for_io(page))
1110 goto continue_unlock;
1111
1112 /* called by fsync() */
1113 if (ino && IS_DNODE(page)) {
1114 int mark = !is_checkpointed_node(sbi, ino);
1115 set_fsync_mark(page, 1);
1116 if (IS_INODE(page))
1117 set_dentry_mark(page, mark);
1118 nwritten++;
1119 } else {
1120 set_fsync_mark(page, 0);
1121 set_dentry_mark(page, 0);
1122 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001123 NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001124 wrote++;
1125
1126 if (--wbc->nr_to_write == 0)
1127 break;
1128 }
1129 pagevec_release(&pvec);
1130 cond_resched();
1131
1132 if (wbc->nr_to_write == 0) {
1133 step = 2;
1134 break;
1135 }
1136 }
1137
1138 if (step < 2) {
1139 step++;
1140 goto next_step;
1141 }
1142
1143 if (wrote)
Changman Leeb1a94e82013-11-15 10:42:51 +09001144 f2fs_submit_merged_bio(sbi, NODE, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001145 return nwritten;
1146}
1147
1148int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1149{
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001150 pgoff_t index = 0, end = LONG_MAX;
1151 struct pagevec pvec;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001152 int ret2 = 0, ret = 0;
1153
1154 pagevec_init(&pvec, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +09001155
1156 while (index <= end) {
1157 int i, nr_pages;
1158 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1159 PAGECACHE_TAG_WRITEBACK,
1160 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1161 if (nr_pages == 0)
1162 break;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001163
1164 for (i = 0; i < nr_pages; i++) {
1165 struct page *page = pvec.pages[i];
1166
1167 /* until radix tree lookup accepts end_index */
Changman Leeb1a94e82013-11-15 10:42:51 +09001168 if (unlikely(page->index > end))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001169 continue;
1170
1171 if (ino && ino_of_node(page) == ino) {
Jaegeuk Kim4b66d802014-03-18 13:29:07 +09001172 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001173 if (TestClearPageError(page))
1174 ret = -EIO;
1175 }
1176 }
1177 pagevec_release(&pvec);
1178 cond_resched();
1179 }
1180
Changman Leeb1a94e82013-11-15 10:42:51 +09001181 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001182 ret2 = -ENOSPC;
Changman Leeb1a94e82013-11-15 10:42:51 +09001183 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001184 ret2 = -EIO;
1185 if (!ret)
1186 ret = ret2;
1187 return ret;
1188}
1189
1190static int f2fs_write_node_page(struct page *page,
1191 struct writeback_control *wbc)
1192{
1193 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1194 nid_t nid;
1195 block_t new_addr;
1196 struct node_info ni;
Changman Leeb1a94e82013-11-15 10:42:51 +09001197 struct f2fs_io_info fio = {
1198 .type = NODE,
1199 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1200 };
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001201
Chao Yu327cb6d2014-05-06 16:48:26 +08001202 trace_f2fs_writepage(page, NODE);
1203
Changman Leeb1a94e82013-11-15 10:42:51 +09001204 if (unlikely(sbi->por_doing))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001205 goto redirty_out;
1206
Jaegeuk Kim4b66d802014-03-18 13:29:07 +09001207 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001208
1209 /* get old block addr of this node page */
1210 nid = nid_of_node(page);
1211 f2fs_bug_on(page->index != nid);
1212
1213 get_node_info(sbi, nid, &ni);
1214
1215 /* This page is already truncated */
Changman Leeb1a94e82013-11-15 10:42:51 +09001216 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001217 dec_page_count(sbi, F2FS_DIRTY_NODES);
1218 unlock_page(page);
1219 return 0;
1220 }
1221
1222 if (wbc->for_reclaim)
1223 goto redirty_out;
1224
1225 mutex_lock(&sbi->node_write);
1226 set_page_writeback(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001227 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001228 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001229 dec_page_count(sbi, F2FS_DIRTY_NODES);
1230 mutex_unlock(&sbi->node_write);
1231 unlock_page(page);
1232 return 0;
1233
1234redirty_out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +09001235 redirty_page_for_writepage(wbc, page);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001236 return AOP_WRITEPAGE_ACTIVATE;
1237}
1238
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001239static int f2fs_write_node_pages(struct address_space *mapping,
1240 struct writeback_control *wbc)
1241{
1242 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001243 long diff;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001244
Chao Yub4d85492014-05-06 16:51:24 +08001245 trace_f2fs_writepages(mapping->host, wbc, NODE);
1246
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001247 /* balancing f2fs's metadata in background */
1248 f2fs_balance_fs_bg(sbi);
1249
1250 /* collect a number of dirty node pages and write together */
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +09001251 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
Jaegeuk Kim823d59f2014-03-18 13:43:05 +09001252 goto skip_write;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001253
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001254 diff = nr_pages_to_write(sbi, NODE, wbc);
Changman Leeb1a94e82013-11-15 10:42:51 +09001255 wbc->sync_mode = WB_SYNC_NONE;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001256 sync_node_pages(sbi, 0, wbc);
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001257 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001258 return 0;
Jaegeuk Kim823d59f2014-03-18 13:43:05 +09001259
1260skip_write:
1261 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1262 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001263}
1264
1265static int f2fs_set_node_page_dirty(struct page *page)
1266{
1267 struct address_space *mapping = page->mapping;
1268 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1269
1270 trace_f2fs_set_page_dirty(page, NODE);
1271
1272 SetPageUptodate(page);
1273 if (!PageDirty(page)) {
1274 __set_page_dirty_nobuffers(page);
1275 inc_page_count(sbi, F2FS_DIRTY_NODES);
1276 SetPagePrivate(page);
1277 return 1;
1278 }
1279 return 0;
1280}
1281
1282static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1283{
1284 struct inode *inode = page->mapping->host;
1285 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1286 if (PageDirty(page))
1287 dec_page_count(sbi, F2FS_DIRTY_NODES);
1288 ClearPagePrivate(page);
1289}
1290
1291static int f2fs_release_node_page(struct page *page, gfp_t wait)
1292{
1293 ClearPagePrivate(page);
1294 return 1;
1295}
1296
1297/*
1298 * Structure of the f2fs node operations
1299 */
1300const struct address_space_operations f2fs_node_aops = {
1301 .writepage = f2fs_write_node_page,
1302 .writepages = f2fs_write_node_pages,
1303 .set_page_dirty = f2fs_set_node_page_dirty,
1304 .invalidatepage = f2fs_invalidate_node_page,
1305 .releasepage = f2fs_release_node_page,
1306};
1307
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001308static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1309 nid_t n)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001310{
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001311 return radix_tree_lookup(&nm_i->free_nid_root, n);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001312}
1313
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001314static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1315 struct free_nid *i)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001316{
1317 list_del(&i->list);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001318 radix_tree_delete(&nm_i->free_nid_root, i->nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001319}
1320
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001321static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001322{
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001323 struct f2fs_nm_info *nm_i = NM_I(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001324 struct free_nid *i;
1325 struct nat_entry *ne;
1326 bool allocated = false;
1327
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001328 if (!available_free_memory(sbi, FREE_NIDS))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001329 return -1;
1330
1331 /* 0 nid should not be used */
Changman Leeb1a94e82013-11-15 10:42:51 +09001332 if (unlikely(nid == 0))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001333 return 0;
1334
1335 if (build) {
1336 /* do not add allocated nids */
1337 read_lock(&nm_i->nat_tree_lock);
1338 ne = __lookup_nat_cache(nm_i, nid);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001339 if (ne &&
1340 (!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001341 allocated = true;
1342 read_unlock(&nm_i->nat_tree_lock);
1343 if (allocated)
1344 return 0;
1345 }
1346
1347 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1348 i->nid = nid;
1349 i->state = NID_NEW;
1350
1351 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001352 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001353 spin_unlock(&nm_i->free_nid_list_lock);
1354 kmem_cache_free(free_nid_slab, i);
1355 return 0;
1356 }
1357 list_add_tail(&i->list, &nm_i->free_nid_list);
1358 nm_i->fcnt++;
1359 spin_unlock(&nm_i->free_nid_list_lock);
1360 return 1;
1361}
1362
1363static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1364{
1365 struct free_nid *i;
Chao Yu784b1352014-04-02 08:55:00 +08001366 bool need_free = false;
1367
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001368 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001369 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001370 if (i && i->state == NID_NEW) {
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001371 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001372 nm_i->fcnt--;
Chao Yu784b1352014-04-02 08:55:00 +08001373 need_free = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001374 }
1375 spin_unlock(&nm_i->free_nid_list_lock);
Chao Yu784b1352014-04-02 08:55:00 +08001376
1377 if (need_free)
1378 kmem_cache_free(free_nid_slab, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001379}
1380
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001381static void scan_nat_page(struct f2fs_sb_info *sbi,
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001382 struct page *nat_page, nid_t start_nid)
1383{
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001384 struct f2fs_nm_info *nm_i = NM_I(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001385 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1386 block_t blk_addr;
1387 int i;
1388
1389 i = start_nid % NAT_ENTRY_PER_BLOCK;
1390
1391 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1392
Changman Leeb1a94e82013-11-15 10:42:51 +09001393 if (unlikely(start_nid >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001394 break;
1395
1396 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1397 f2fs_bug_on(blk_addr == NEW_ADDR);
1398 if (blk_addr == NULL_ADDR) {
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001399 if (add_free_nid(sbi, start_nid, true) < 0)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001400 break;
1401 }
1402 }
1403}
1404
1405static void build_free_nids(struct f2fs_sb_info *sbi)
1406{
1407 struct f2fs_nm_info *nm_i = NM_I(sbi);
1408 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1409 struct f2fs_summary_block *sum = curseg->sum_blk;
1410 int i = 0;
1411 nid_t nid = nm_i->next_scan_nid;
1412
1413 /* Enough entries */
1414 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1415 return;
1416
1417 /* readahead nat pages to be scanned */
Chao Yu624b14f2014-02-07 16:11:53 +08001418 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001419
1420 while (1) {
1421 struct page *page = get_current_nat_page(sbi, nid);
1422
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001423 scan_nat_page(sbi, page, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001424 f2fs_put_page(page, 1);
1425
1426 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
Changman Leeb1a94e82013-11-15 10:42:51 +09001427 if (unlikely(nid >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001428 nid = 0;
1429
1430 if (i++ == FREE_NID_PAGES)
1431 break;
1432 }
1433
1434 /* go to the next free nat pages to find free nids abundantly */
1435 nm_i->next_scan_nid = nid;
1436
1437 /* find free nids from current sum_pages */
1438 mutex_lock(&curseg->curseg_mutex);
1439 for (i = 0; i < nats_in_cursum(sum); i++) {
1440 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1441 nid = le32_to_cpu(nid_in_journal(sum, i));
1442 if (addr == NULL_ADDR)
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001443 add_free_nid(sbi, nid, true);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001444 else
1445 remove_free_nid(nm_i, nid);
1446 }
1447 mutex_unlock(&curseg->curseg_mutex);
1448}
1449
1450/*
1451 * If this function returns success, caller can obtain a new nid
1452 * from second parameter of this function.
1453 * The returned nid could be used ino as well as nid when inode is created.
1454 */
1455bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1456{
1457 struct f2fs_nm_info *nm_i = NM_I(sbi);
1458 struct free_nid *i = NULL;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001459retry:
Jaegeuk Kim989f9142014-04-18 11:14:37 +09001460 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001461 return false;
1462
1463 spin_lock(&nm_i->free_nid_list_lock);
1464
1465 /* We should not use stale free nids created by build_free_nids */
Gu Zhengaa9c8b02014-02-21 18:08:29 +08001466 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001467 f2fs_bug_on(list_empty(&nm_i->free_nid_list));
Chao Yu48c561a2014-03-29 11:33:17 +08001468 list_for_each_entry(i, &nm_i->free_nid_list, list)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001469 if (i->state == NID_NEW)
1470 break;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001471
1472 f2fs_bug_on(i->state != NID_NEW);
1473 *nid = i->nid;
1474 i->state = NID_ALLOC;
1475 nm_i->fcnt--;
1476 spin_unlock(&nm_i->free_nid_list_lock);
1477 return true;
1478 }
1479 spin_unlock(&nm_i->free_nid_list_lock);
1480
1481 /* Let's scan nat pages and its caches to get free nids */
1482 mutex_lock(&nm_i->build_lock);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001483 build_free_nids(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001484 mutex_unlock(&nm_i->build_lock);
1485 goto retry;
1486}
1487
1488/*
1489 * alloc_nid() should be called prior to this function.
1490 */
1491void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1492{
1493 struct f2fs_nm_info *nm_i = NM_I(sbi);
1494 struct free_nid *i;
1495
1496 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001497 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001498 f2fs_bug_on(!i || i->state != NID_ALLOC);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001499 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001500 spin_unlock(&nm_i->free_nid_list_lock);
Chao Yu784b1352014-04-02 08:55:00 +08001501
1502 kmem_cache_free(free_nid_slab, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001503}
1504
1505/*
1506 * alloc_nid() should be called prior to this function.
1507 */
1508void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1509{
1510 struct f2fs_nm_info *nm_i = NM_I(sbi);
1511 struct free_nid *i;
Chao Yu784b1352014-04-02 08:55:00 +08001512 bool need_free = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001513
1514 if (!nid)
1515 return;
1516
1517 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001518 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001519 f2fs_bug_on(!i || i->state != NID_ALLOC);
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001520 if (!available_free_memory(sbi, FREE_NIDS)) {
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001521 __del_from_free_nid_list(nm_i, i);
Chao Yu784b1352014-04-02 08:55:00 +08001522 need_free = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001523 } else {
1524 i->state = NID_NEW;
1525 nm_i->fcnt++;
1526 }
1527 spin_unlock(&nm_i->free_nid_list_lock);
Chao Yu784b1352014-04-02 08:55:00 +08001528
1529 if (need_free)
1530 kmem_cache_free(free_nid_slab, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001531}
1532
1533void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1534 struct f2fs_summary *sum, struct node_info *ni,
1535 block_t new_blkaddr)
1536{
1537 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001538 set_node_addr(sbi, ni, new_blkaddr, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001539 clear_node_page_dirty(page);
1540}
1541
Jingoo Han2e4e9862014-04-15 17:51:05 +09001542static void recover_inline_xattr(struct inode *inode, struct page *page)
Chao Yub3606c92014-03-11 13:37:38 +08001543{
1544 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1545 void *src_addr, *dst_addr;
1546 size_t inline_size;
1547 struct page *ipage;
1548 struct f2fs_inode *ri;
1549
Chao Yu70da8c62014-03-12 15:59:03 +08001550 if (!f2fs_has_inline_xattr(inode))
Chao Yub3606c92014-03-11 13:37:38 +08001551 return;
1552
1553 if (!IS_INODE(page))
1554 return;
1555
1556 ri = F2FS_INODE(page);
1557 if (!(ri->i_inline & F2FS_INLINE_XATTR))
1558 return;
1559
1560 ipage = get_node_page(sbi, inode->i_ino);
1561 f2fs_bug_on(IS_ERR(ipage));
1562
1563 dst_addr = inline_xattr_addr(ipage);
1564 src_addr = inline_xattr_addr(page);
1565 inline_size = inline_xattr_size(inode);
1566
Jaegeuk Kim0f9e6b52014-04-29 17:28:32 +09001567 f2fs_wait_on_page_writeback(ipage, NODE);
Chao Yub3606c92014-03-11 13:37:38 +08001568 memcpy(dst_addr, src_addr, inline_size);
1569
1570 update_inode(inode, ipage);
1571 f2fs_put_page(ipage, 1);
1572}
1573
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001574bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1575{
1576 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1577 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1578 nid_t new_xnid = nid_of_node(page);
1579 struct node_info ni;
1580
Chao Yub3606c92014-03-11 13:37:38 +08001581 recover_inline_xattr(inode, page);
1582
Chao Yueea95c42014-03-17 16:35:06 +08001583 if (!f2fs_has_xattr_block(ofs_of_node(page)))
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001584 return false;
1585
1586 /* 1: invalidate the previous xattr nid */
1587 if (!prev_xnid)
1588 goto recover_xnid;
1589
1590 /* Deallocate node address */
1591 get_node_info(sbi, prev_xnid, &ni);
1592 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
1593 invalidate_blocks(sbi, ni.blk_addr);
1594 dec_valid_node_count(sbi, inode);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001595 set_node_addr(sbi, &ni, NULL_ADDR, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001596
1597recover_xnid:
1598 /* 2: allocate new xattr nid */
1599 if (unlikely(!inc_valid_node_count(sbi, inode)))
1600 f2fs_bug_on(1);
1601
1602 remove_free_nid(NM_I(sbi), new_xnid);
1603 get_node_info(sbi, new_xnid, &ni);
1604 ni.ino = inode->i_ino;
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001605 set_node_addr(sbi, &ni, NEW_ADDR, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001606 F2FS_I(inode)->i_xattr_nid = new_xnid;
1607
1608 /* 3: update xattr blkaddr */
1609 refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001610 set_node_addr(sbi, &ni, blkaddr, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001611
1612 update_inode_page(inode);
1613 return true;
1614}
1615
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001616int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1617{
Changman Leeb1a94e82013-11-15 10:42:51 +09001618 struct f2fs_inode *src, *dst;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001619 nid_t ino = ino_of_node(page);
1620 struct node_info old_ni, new_ni;
1621 struct page *ipage;
1622
Jaegeuk Kimeb6b0492014-04-18 15:21:04 +09001623 get_node_info(sbi, ino, &old_ni);
1624
1625 if (unlikely(old_ni.blk_addr != NULL_ADDR))
1626 return -EINVAL;
1627
Changman Leeb1a94e82013-11-15 10:42:51 +09001628 ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001629 if (!ipage)
1630 return -ENOMEM;
1631
1632 /* Should not use this inode from free nid list */
1633 remove_free_nid(NM_I(sbi), ino);
1634
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001635 SetPageUptodate(ipage);
1636 fill_node_footer(ipage, ino, ino, 0, true);
1637
Changman Leeb1a94e82013-11-15 10:42:51 +09001638 src = F2FS_INODE(page);
1639 dst = F2FS_INODE(ipage);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001640
Changman Leeb1a94e82013-11-15 10:42:51 +09001641 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
1642 dst->i_size = 0;
1643 dst->i_blocks = cpu_to_le64(1);
1644 dst->i_links = cpu_to_le32(1);
1645 dst->i_xattr_nid = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001646
1647 new_ni = old_ni;
1648 new_ni.ino = ino;
1649
Changman Leeb1a94e82013-11-15 10:42:51 +09001650 if (unlikely(!inc_valid_node_count(sbi, NULL)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001651 WARN_ON(1);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001652 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001653 inc_valid_inode_count(sbi);
1654 f2fs_put_page(ipage, 1);
1655 return 0;
1656}
1657
Changman Leeb1a94e82013-11-15 10:42:51 +09001658/*
1659 * ra_sum_pages() merge contiguous pages into one bio and submit.
Chao Yu53826b52014-05-27 08:41:07 +08001660 * these pre-readed pages are alloced in bd_inode's mapping tree.
Changman Leeb1a94e82013-11-15 10:42:51 +09001661 */
Chao Yu53826b52014-05-27 08:41:07 +08001662static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
Changman Leeb1a94e82013-11-15 10:42:51 +09001663 int start, int nrpages)
1664{
Chao Yu53826b52014-05-27 08:41:07 +08001665 struct inode *inode = sbi->sb->s_bdev->bd_inode;
1666 struct address_space *mapping = inode->i_mapping;
1667 int i, page_idx = start;
Changman Leeb1a94e82013-11-15 10:42:51 +09001668 struct f2fs_io_info fio = {
1669 .type = META,
1670 .rw = READ_SYNC | REQ_META | REQ_PRIO
1671 };
1672
Chao Yu53826b52014-05-27 08:41:07 +08001673 for (i = 0; page_idx < start + nrpages; page_idx++, i++) {
1674 /* alloc page in bd_inode for reading node summary info */
1675 pages[i] = grab_cache_page(mapping, page_idx);
1676 if (!pages[i])
Gu Zheng12e374b2014-03-07 18:43:36 +08001677 break;
Chao Yu53826b52014-05-27 08:41:07 +08001678 f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio);
Changman Leeb1a94e82013-11-15 10:42:51 +09001679 }
1680
Changman Leeb1a94e82013-11-15 10:42:51 +09001681 f2fs_submit_merged_bio(sbi, META, READ);
Chao Yu53826b52014-05-27 08:41:07 +08001682 return i;
Changman Leeb1a94e82013-11-15 10:42:51 +09001683}
1684
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001685int restore_node_summary(struct f2fs_sb_info *sbi,
1686 unsigned int segno, struct f2fs_summary_block *sum)
1687{
1688 struct f2fs_node *rn;
1689 struct f2fs_summary *sum_entry;
Chao Yu53826b52014-05-27 08:41:07 +08001690 struct inode *inode = sbi->sb->s_bdev->bd_inode;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001691 block_t addr;
Changman Leeb1a94e82013-11-15 10:42:51 +09001692 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
Chao Yu53826b52014-05-27 08:41:07 +08001693 struct page *pages[bio_blocks];
1694 int i, idx, last_offset, nrpages, err = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001695
1696 /* scan the node segment */
1697 last_offset = sbi->blocks_per_seg;
1698 addr = START_BLOCK(sbi, segno);
1699 sum_entry = &sum->entries[0];
1700
Gu Zheng12e374b2014-03-07 18:43:36 +08001701 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
Changman Leeb1a94e82013-11-15 10:42:51 +09001702 nrpages = min(last_offset - i, bio_blocks);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001703
Changman Leeb1a94e82013-11-15 10:42:51 +09001704 /* read ahead node pages */
Chao Yu53826b52014-05-27 08:41:07 +08001705 nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
Gu Zheng12e374b2014-03-07 18:43:36 +08001706 if (!nrpages)
1707 return -ENOMEM;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001708
Chao Yu53826b52014-05-27 08:41:07 +08001709 for (idx = 0; idx < nrpages; idx++) {
Gu Zheng12e374b2014-03-07 18:43:36 +08001710 if (err)
1711 goto skip;
Changman Leeb1a94e82013-11-15 10:42:51 +09001712
Chao Yu53826b52014-05-27 08:41:07 +08001713 lock_page(pages[idx]);
1714 if (unlikely(!PageUptodate(pages[idx]))) {
Changman Leeb1a94e82013-11-15 10:42:51 +09001715 err = -EIO;
1716 } else {
Chao Yu53826b52014-05-27 08:41:07 +08001717 rn = F2FS_NODE(pages[idx]);
Changman Leeb1a94e82013-11-15 10:42:51 +09001718 sum_entry->nid = rn->footer.nid;
1719 sum_entry->version = 0;
1720 sum_entry->ofs_in_node = 0;
1721 sum_entry++;
1722 }
Chao Yu53826b52014-05-27 08:41:07 +08001723 unlock_page(pages[idx]);
Gu Zheng12e374b2014-03-07 18:43:36 +08001724skip:
Chao Yu53826b52014-05-27 08:41:07 +08001725 page_cache_release(pages[idx]);
Changman Leeb1a94e82013-11-15 10:42:51 +09001726 }
Chao Yu53826b52014-05-27 08:41:07 +08001727
1728 invalidate_mapping_pages(inode->i_mapping, addr,
1729 addr + nrpages);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001730 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001731 return err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001732}
1733
1734static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1735{
1736 struct f2fs_nm_info *nm_i = NM_I(sbi);
1737 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1738 struct f2fs_summary_block *sum = curseg->sum_blk;
1739 int i;
1740
1741 mutex_lock(&curseg->curseg_mutex);
1742
1743 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1744 mutex_unlock(&curseg->curseg_mutex);
1745 return false;
1746 }
1747
1748 for (i = 0; i < nats_in_cursum(sum); i++) {
1749 struct nat_entry *ne;
1750 struct f2fs_nat_entry raw_ne;
1751 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1752
1753 raw_ne = nat_in_journal(sum, i);
1754retry:
1755 write_lock(&nm_i->nat_tree_lock);
1756 ne = __lookup_nat_cache(nm_i, nid);
1757 if (ne) {
1758 __set_nat_cache_dirty(nm_i, ne);
1759 write_unlock(&nm_i->nat_tree_lock);
1760 continue;
1761 }
1762 ne = grab_nat_entry(nm_i, nid);
1763 if (!ne) {
1764 write_unlock(&nm_i->nat_tree_lock);
1765 goto retry;
1766 }
Chao Yu17eac8c2014-04-17 10:51:05 +08001767 node_info_from_raw_nat(&ne->ni, &raw_ne);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001768 __set_nat_cache_dirty(nm_i, ne);
1769 write_unlock(&nm_i->nat_tree_lock);
1770 }
1771 update_nats_in_cursum(sum, -i);
1772 mutex_unlock(&curseg->curseg_mutex);
1773 return true;
1774}
1775
1776/*
1777 * This function is called during the checkpointing process.
1778 */
1779void flush_nat_entries(struct f2fs_sb_info *sbi)
1780{
1781 struct f2fs_nm_info *nm_i = NM_I(sbi);
1782 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1783 struct f2fs_summary_block *sum = curseg->sum_blk;
Chao Yu48c561a2014-03-29 11:33:17 +08001784 struct nat_entry *ne, *cur;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001785 struct page *page = NULL;
1786 struct f2fs_nat_block *nat_blk = NULL;
1787 nid_t start_nid = 0, end_nid = 0;
1788 bool flushed;
1789
1790 flushed = flush_nats_in_journal(sbi);
1791
1792 if (!flushed)
1793 mutex_lock(&curseg->curseg_mutex);
1794
1795 /* 1) flush dirty nat caches */
Chao Yu48c561a2014-03-29 11:33:17 +08001796 list_for_each_entry_safe(ne, cur, &nm_i->dirty_nat_entries, list) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001797 nid_t nid;
1798 struct f2fs_nat_entry raw_ne;
1799 int offset = -1;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001800
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001801 if (nat_get_blkaddr(ne) == NEW_ADDR)
1802 continue;
Chao Yu48c561a2014-03-29 11:33:17 +08001803
1804 nid = nat_get_nid(ne);
1805
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001806 if (flushed)
1807 goto to_nat_page;
1808
1809 /* if there is room for nat enries in curseg->sumpage */
1810 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1811 if (offset >= 0) {
1812 raw_ne = nat_in_journal(sum, offset);
1813 goto flush_now;
1814 }
1815to_nat_page:
1816 if (!page || (start_nid > nid || nid > end_nid)) {
1817 if (page) {
1818 f2fs_put_page(page, 1);
1819 page = NULL;
1820 }
1821 start_nid = START_NID(nid);
1822 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1823
1824 /*
1825 * get nat block with dirty flag, increased reference
1826 * count, mapped and lock
1827 */
1828 page = get_next_nat_page(sbi, start_nid);
1829 nat_blk = page_address(page);
1830 }
1831
1832 f2fs_bug_on(!nat_blk);
1833 raw_ne = nat_blk->entries[nid - start_nid];
1834flush_now:
Chao Yu17eac8c2014-04-17 10:51:05 +08001835 raw_nat_from_node_info(&raw_ne, &ne->ni);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001836
1837 if (offset < 0) {
1838 nat_blk->entries[nid - start_nid] = raw_ne;
1839 } else {
1840 nat_in_journal(sum, offset) = raw_ne;
1841 nid_in_journal(sum, offset) = cpu_to_le32(nid);
1842 }
1843
1844 if (nat_get_blkaddr(ne) == NULL_ADDR &&
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001845 add_free_nid(sbi, nid, false) <= 0) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001846 write_lock(&nm_i->nat_tree_lock);
1847 __del_from_nat_cache(nm_i, ne);
1848 write_unlock(&nm_i->nat_tree_lock);
1849 } else {
1850 write_lock(&nm_i->nat_tree_lock);
1851 __clear_nat_cache_dirty(nm_i, ne);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001852 write_unlock(&nm_i->nat_tree_lock);
1853 }
1854 }
1855 if (!flushed)
1856 mutex_unlock(&curseg->curseg_mutex);
1857 f2fs_put_page(page, 1);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001858}
1859
1860static int init_node_manager(struct f2fs_sb_info *sbi)
1861{
1862 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1863 struct f2fs_nm_info *nm_i = NM_I(sbi);
1864 unsigned char *version_bitmap;
1865 unsigned int nat_segs, nat_blocks;
1866
1867 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1868
1869 /* segment_count_nat includes pair segment so divide to 2. */
1870 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1871 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
Jaegeuk Kim8fa144b2014-02-17 12:44:20 +09001872
Jaegeuk Kim989f9142014-04-18 11:14:37 +09001873 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1874
Jaegeuk Kim8fa144b2014-02-17 12:44:20 +09001875 /* not used nids: 0, node, meta, (and root counted as valid node) */
Jaegeuk Kim989f9142014-04-18 11:14:37 +09001876 nm_i->available_nids = nm_i->max_nid - 3;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001877 nm_i->fcnt = 0;
1878 nm_i->nat_cnt = 0;
Jaegeuk Kim327c57d2014-03-19 13:31:37 +09001879 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001880
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001881 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001882 INIT_LIST_HEAD(&nm_i->free_nid_list);
1883 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1884 INIT_LIST_HEAD(&nm_i->nat_entries);
1885 INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1886
1887 mutex_init(&nm_i->build_lock);
1888 spin_lock_init(&nm_i->free_nid_list_lock);
1889 rwlock_init(&nm_i->nat_tree_lock);
1890
1891 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1892 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1893 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1894 if (!version_bitmap)
1895 return -EFAULT;
1896
1897 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1898 GFP_KERNEL);
1899 if (!nm_i->nat_bitmap)
1900 return -ENOMEM;
1901 return 0;
1902}
1903
1904int build_node_manager(struct f2fs_sb_info *sbi)
1905{
1906 int err;
1907
1908 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1909 if (!sbi->nm_info)
1910 return -ENOMEM;
1911
1912 err = init_node_manager(sbi);
1913 if (err)
1914 return err;
1915
1916 build_free_nids(sbi);
1917 return 0;
1918}
1919
1920void destroy_node_manager(struct f2fs_sb_info *sbi)
1921{
1922 struct f2fs_nm_info *nm_i = NM_I(sbi);
1923 struct free_nid *i, *next_i;
1924 struct nat_entry *natvec[NATVEC_SIZE];
1925 nid_t nid = 0;
1926 unsigned int found;
1927
1928 if (!nm_i)
1929 return;
1930
1931 /* destroy free nid list */
1932 spin_lock(&nm_i->free_nid_list_lock);
1933 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1934 f2fs_bug_on(i->state == NID_ALLOC);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001935 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001936 nm_i->fcnt--;
Chao Yu784b1352014-04-02 08:55:00 +08001937 spin_unlock(&nm_i->free_nid_list_lock);
1938 kmem_cache_free(free_nid_slab, i);
1939 spin_lock(&nm_i->free_nid_list_lock);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001940 }
1941 f2fs_bug_on(nm_i->fcnt);
1942 spin_unlock(&nm_i->free_nid_list_lock);
1943
1944 /* destroy nat cache */
1945 write_lock(&nm_i->nat_tree_lock);
1946 while ((found = __gang_lookup_nat_cache(nm_i,
1947 nid, NATVEC_SIZE, natvec))) {
1948 unsigned idx;
Gu Zheng0c97ea92014-03-07 18:43:24 +08001949 nid = nat_get_nid(natvec[found - 1]) + 1;
1950 for (idx = 0; idx < found; idx++)
1951 __del_from_nat_cache(nm_i, natvec[idx]);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001952 }
1953 f2fs_bug_on(nm_i->nat_cnt);
1954 write_unlock(&nm_i->nat_tree_lock);
1955
1956 kfree(nm_i->nat_bitmap);
1957 sbi->nm_info = NULL;
1958 kfree(nm_i);
1959}
1960
1961int __init create_node_manager_caches(void)
1962{
1963 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
Gu Zhenge33dcea2014-03-07 18:43:28 +08001964 sizeof(struct nat_entry));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001965 if (!nat_entry_slab)
1966 return -ENOMEM;
1967
1968 free_nid_slab = f2fs_kmem_cache_create("free_nid",
Gu Zhenge33dcea2014-03-07 18:43:28 +08001969 sizeof(struct free_nid));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001970 if (!free_nid_slab) {
1971 kmem_cache_destroy(nat_entry_slab);
1972 return -ENOMEM;
1973 }
1974 return 0;
1975}
1976
1977void destroy_node_manager_caches(void)
1978{
1979 kmem_cache_destroy(free_nid_slab);
1980 kmem_cache_destroy(nat_entry_slab);
1981}