blob: 22faa7881d195e494b447b918c3d456ecd13385e [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include <trace/events/f2fs.h>
23
Gu Zhengaa9c8b02014-02-21 18:08:29 +080024#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
25
Linus Torvalds8005ecc2012-12-20 13:54:51 -080026static struct kmem_cache *nat_entry_slab;
27static struct kmem_cache *free_nid_slab;
28
Jaegeuk Kim250c7692014-04-16 10:47:06 +090029bool available_free_memory(struct f2fs_sb_info *sbi, int type)
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090030{
Jaegeuk Kim250c7692014-04-16 10:47:06 +090031 struct f2fs_nm_info *nm_i = NM_I(sbi);
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090032 struct sysinfo val;
33 unsigned long mem_size = 0;
Jaegeuk Kim250c7692014-04-16 10:47:06 +090034 bool res = false;
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090035
36 si_meminfo(&val);
Jaegeuk Kim250c7692014-04-16 10:47:06 +090037 /* give 25%, 25%, 50% memory for each components respectively */
38 if (type == FREE_NIDS) {
39 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12;
40 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
41 } else if (type == NAT_ENTRIES) {
42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
44 } else if (type == DIRTY_DENTS) {
45 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
46 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
47 }
48 return res;
Jaegeuk Kim327c57d2014-03-19 13:31:37 +090049}
50
Linus Torvalds8005ecc2012-12-20 13:54:51 -080051static void clear_node_page_dirty(struct page *page)
52{
53 struct address_space *mapping = page->mapping;
54 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
55 unsigned int long flags;
56
57 if (PageDirty(page)) {
58 spin_lock_irqsave(&mapping->tree_lock, flags);
59 radix_tree_tag_clear(&mapping->page_tree,
60 page_index(page),
61 PAGECACHE_TAG_DIRTY);
62 spin_unlock_irqrestore(&mapping->tree_lock, flags);
63
64 clear_page_dirty_for_io(page);
65 dec_page_count(sbi, F2FS_DIRTY_NODES);
66 }
67 ClearPageUptodate(page);
68}
69
70static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
71{
72 pgoff_t index = current_nat_addr(sbi, nid);
73 return get_meta_page(sbi, index);
74}
75
76static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
77{
78 struct page *src_page;
79 struct page *dst_page;
80 pgoff_t src_off;
81 pgoff_t dst_off;
82 void *src_addr;
83 void *dst_addr;
84 struct f2fs_nm_info *nm_i = NM_I(sbi);
85
86 src_off = current_nat_addr(sbi, nid);
87 dst_off = next_nat_addr(sbi, src_off);
88
89 /* get current nat block page with lock */
90 src_page = get_meta_page(sbi, src_off);
91
92 /* Dirty src_page means that it is already the new target NAT page. */
93 if (PageDirty(src_page))
94 return src_page;
95
96 dst_page = grab_meta_page(sbi, dst_off);
97
98 src_addr = page_address(src_page);
99 dst_addr = page_address(dst_page);
100 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
101 set_page_dirty(dst_page);
102 f2fs_put_page(src_page, 1);
103
104 set_to_next_nat(nm_i, nid);
105
106 return dst_page;
107}
108
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800109static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
110{
111 return radix_tree_lookup(&nm_i->nat_root, n);
112}
113
114static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
115 nid_t start, unsigned int nr, struct nat_entry **ep)
116{
117 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
118}
119
120static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
121{
122 list_del(&e->list);
123 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
124 nm_i->nat_cnt--;
125 kmem_cache_free(nat_entry_slab, e);
126}
127
128int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
129{
130 struct f2fs_nm_info *nm_i = NM_I(sbi);
131 struct nat_entry *e;
132 int is_cp = 1;
133
134 read_lock(&nm_i->nat_tree_lock);
135 e = __lookup_nat_cache(nm_i, nid);
136 if (e && !e->checkpointed)
137 is_cp = 0;
138 read_unlock(&nm_i->nat_tree_lock);
139 return is_cp;
140}
141
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900142bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid)
143{
144 struct f2fs_nm_info *nm_i = NM_I(sbi);
145 struct nat_entry *e;
146 bool fsync_done = false;
147
148 read_lock(&nm_i->nat_tree_lock);
149 e = __lookup_nat_cache(nm_i, nid);
150 if (e)
151 fsync_done = e->fsync_done;
152 read_unlock(&nm_i->nat_tree_lock);
153 return fsync_done;
154}
155
Jaegeuk Kim44f7a3b2014-06-04 00:39:42 +0900156void fsync_mark_clear(struct f2fs_sb_info *sbi, nid_t nid)
157{
158 struct f2fs_nm_info *nm_i = NM_I(sbi);
159 struct nat_entry *e;
160
161 write_lock(&nm_i->nat_tree_lock);
162 e = __lookup_nat_cache(nm_i, nid);
163 if (e)
164 e->fsync_done = false;
165 write_unlock(&nm_i->nat_tree_lock);
166}
167
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800168static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
169{
170 struct nat_entry *new;
171
172 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
173 if (!new)
174 return NULL;
175 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
176 kmem_cache_free(nat_entry_slab, new);
177 return NULL;
178 }
179 memset(new, 0, sizeof(struct nat_entry));
180 nat_set_nid(new, nid);
Jaegeuk Kimc5ed3b72014-02-21 13:17:22 +0900181 new->checkpointed = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800182 list_add_tail(&new->list, &nm_i->nat_entries);
183 nm_i->nat_cnt++;
184 return new;
185}
186
187static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
188 struct f2fs_nat_entry *ne)
189{
190 struct nat_entry *e;
191retry:
192 write_lock(&nm_i->nat_tree_lock);
193 e = __lookup_nat_cache(nm_i, nid);
194 if (!e) {
195 e = grab_nat_entry(nm_i, nid);
196 if (!e) {
197 write_unlock(&nm_i->nat_tree_lock);
198 goto retry;
199 }
Chao Yu17eac8c2014-04-17 10:51:05 +0800200 node_info_from_raw_nat(&e->ni, ne);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800201 }
202 write_unlock(&nm_i->nat_tree_lock);
203}
204
205static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900206 block_t new_blkaddr, bool fsync_done)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800207{
208 struct f2fs_nm_info *nm_i = NM_I(sbi);
209 struct nat_entry *e;
210retry:
211 write_lock(&nm_i->nat_tree_lock);
212 e = __lookup_nat_cache(nm_i, ni->nid);
213 if (!e) {
214 e = grab_nat_entry(nm_i, ni->nid);
215 if (!e) {
216 write_unlock(&nm_i->nat_tree_lock);
217 goto retry;
218 }
219 e->ni = *ni;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800220 f2fs_bug_on(ni->blk_addr == NEW_ADDR);
221 } else if (new_blkaddr == NEW_ADDR) {
222 /*
223 * when nid is reallocated,
224 * previous nat entry can be remained in nat cache.
225 * So, reinitialize it with new information.
226 */
227 e->ni = *ni;
228 f2fs_bug_on(ni->blk_addr != NULL_ADDR);
229 }
230
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800231 /* sanity check */
232 f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
233 f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
234 new_blkaddr == NULL_ADDR);
235 f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
236 new_blkaddr == NEW_ADDR);
237 f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
238 nat_get_blkaddr(e) != NULL_ADDR &&
239 new_blkaddr == NEW_ADDR);
240
241 /* increament version no as node is removed */
242 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
243 unsigned char version = nat_get_version(e);
244 nat_set_version(e, inc_node_version(version));
245 }
246
247 /* change address */
248 nat_set_blkaddr(e, new_blkaddr);
249 __set_nat_cache_dirty(nm_i, e);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900250
251 /* update fsync_mark if its inode nat entry is still alive */
252 e = __lookup_nat_cache(nm_i, ni->ino);
253 if (e)
254 e->fsync_done = fsync_done;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800255 write_unlock(&nm_i->nat_tree_lock);
256}
257
258int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
259{
260 struct f2fs_nm_info *nm_i = NM_I(sbi);
261
Jaegeuk Kim250c7692014-04-16 10:47:06 +0900262 if (available_free_memory(sbi, NAT_ENTRIES))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800263 return 0;
264
265 write_lock(&nm_i->nat_tree_lock);
266 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
267 struct nat_entry *ne;
268 ne = list_first_entry(&nm_i->nat_entries,
269 struct nat_entry, list);
270 __del_from_nat_cache(nm_i, ne);
271 nr_shrink--;
272 }
273 write_unlock(&nm_i->nat_tree_lock);
274 return nr_shrink;
275}
276
277/*
278 * This function returns always success
279 */
280void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
281{
282 struct f2fs_nm_info *nm_i = NM_I(sbi);
283 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
284 struct f2fs_summary_block *sum = curseg->sum_blk;
285 nid_t start_nid = START_NID(nid);
286 struct f2fs_nat_block *nat_blk;
287 struct page *page = NULL;
288 struct f2fs_nat_entry ne;
289 struct nat_entry *e;
290 int i;
291
292 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
293 ni->nid = nid;
294
295 /* Check nat cache */
296 read_lock(&nm_i->nat_tree_lock);
297 e = __lookup_nat_cache(nm_i, nid);
298 if (e) {
299 ni->ino = nat_get_ino(e);
300 ni->blk_addr = nat_get_blkaddr(e);
301 ni->version = nat_get_version(e);
302 }
303 read_unlock(&nm_i->nat_tree_lock);
304 if (e)
305 return;
306
307 /* Check current segment summary */
308 mutex_lock(&curseg->curseg_mutex);
309 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
310 if (i >= 0) {
311 ne = nat_in_journal(sum, i);
312 node_info_from_raw_nat(ni, &ne);
313 }
314 mutex_unlock(&curseg->curseg_mutex);
315 if (i >= 0)
316 goto cache;
317
318 /* Fill node_info from nat page */
319 page = get_current_nat_page(sbi, start_nid);
320 nat_blk = (struct f2fs_nat_block *)page_address(page);
321 ne = nat_blk->entries[nid - start_nid];
322 node_info_from_raw_nat(ni, &ne);
323 f2fs_put_page(page, 1);
324cache:
325 /* cache nat entry */
326 cache_nat_entry(NM_I(sbi), nid, &ne);
327}
328
329/*
330 * The maximum depth is four.
331 * Offset[0] will have raw inode offset.
332 */
333static int get_node_path(struct f2fs_inode_info *fi, long block,
334 int offset[4], unsigned int noffset[4])
335{
336 const long direct_index = ADDRS_PER_INODE(fi);
337 const long direct_blks = ADDRS_PER_BLOCK;
338 const long dptrs_per_blk = NIDS_PER_BLOCK;
339 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
340 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
341 int n = 0;
342 int level = 0;
343
344 noffset[0] = 0;
345
346 if (block < direct_index) {
347 offset[n] = block;
348 goto got;
349 }
350 block -= direct_index;
351 if (block < direct_blks) {
352 offset[n++] = NODE_DIR1_BLOCK;
353 noffset[n] = 1;
354 offset[n] = block;
355 level = 1;
356 goto got;
357 }
358 block -= direct_blks;
359 if (block < direct_blks) {
360 offset[n++] = NODE_DIR2_BLOCK;
361 noffset[n] = 2;
362 offset[n] = block;
363 level = 1;
364 goto got;
365 }
366 block -= direct_blks;
367 if (block < indirect_blks) {
368 offset[n++] = NODE_IND1_BLOCK;
369 noffset[n] = 3;
370 offset[n++] = block / direct_blks;
371 noffset[n] = 4 + offset[n - 1];
372 offset[n] = block % direct_blks;
373 level = 2;
374 goto got;
375 }
376 block -= indirect_blks;
377 if (block < indirect_blks) {
378 offset[n++] = NODE_IND2_BLOCK;
379 noffset[n] = 4 + dptrs_per_blk;
380 offset[n++] = block / direct_blks;
381 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
382 offset[n] = block % direct_blks;
383 level = 2;
384 goto got;
385 }
386 block -= indirect_blks;
387 if (block < dindirect_blks) {
388 offset[n++] = NODE_DIND_BLOCK;
389 noffset[n] = 5 + (dptrs_per_blk * 2);
390 offset[n++] = block / indirect_blks;
391 noffset[n] = 6 + (dptrs_per_blk * 2) +
392 offset[n - 1] * (dptrs_per_blk + 1);
393 offset[n++] = (block / direct_blks) % dptrs_per_blk;
394 noffset[n] = 7 + (dptrs_per_blk * 2) +
395 offset[n - 2] * (dptrs_per_blk + 1) +
396 offset[n - 1];
397 offset[n] = block % direct_blks;
398 level = 3;
399 goto got;
400 } else {
401 BUG();
402 }
403got:
404 return level;
405}
406
407/*
408 * Caller should call f2fs_put_dnode(dn).
Changman Leeb1a94e82013-11-15 10:42:51 +0900409 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
410 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800411 * In the case of RDONLY_NODE, we don't need to care about mutex.
412 */
413int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
414{
415 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
416 struct page *npage[4];
417 struct page *parent;
418 int offset[4];
419 unsigned int noffset[4];
420 nid_t nids[4];
421 int level, i;
422 int err = 0;
423
424 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
425
426 nids[0] = dn->inode->i_ino;
427 npage[0] = dn->inode_page;
428
429 if (!npage[0]) {
430 npage[0] = get_node_page(sbi, nids[0]);
431 if (IS_ERR(npage[0]))
432 return PTR_ERR(npage[0]);
433 }
434 parent = npage[0];
435 if (level != 0)
436 nids[1] = get_nid(parent, offset[0], true);
437 dn->inode_page = npage[0];
438 dn->inode_page_locked = true;
439
440 /* get indirect or direct nodes */
441 for (i = 1; i <= level; i++) {
442 bool done = false;
443
444 if (!nids[i] && mode == ALLOC_NODE) {
445 /* alloc new node */
446 if (!alloc_nid(sbi, &(nids[i]))) {
447 err = -ENOSPC;
448 goto release_pages;
449 }
450
451 dn->nid = nids[i];
452 npage[i] = new_node_page(dn, noffset[i], NULL);
453 if (IS_ERR(npage[i])) {
454 alloc_nid_failed(sbi, nids[i]);
455 err = PTR_ERR(npage[i]);
456 goto release_pages;
457 }
458
459 set_nid(parent, offset[i - 1], nids[i], i == 1);
460 alloc_nid_done(sbi, nids[i]);
461 done = true;
462 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
463 npage[i] = get_node_page_ra(parent, offset[i - 1]);
464 if (IS_ERR(npage[i])) {
465 err = PTR_ERR(npage[i]);
466 goto release_pages;
467 }
468 done = true;
469 }
470 if (i == 1) {
471 dn->inode_page_locked = false;
472 unlock_page(parent);
473 } else {
474 f2fs_put_page(parent, 1);
475 }
476
477 if (!done) {
478 npage[i] = get_node_page(sbi, nids[i]);
479 if (IS_ERR(npage[i])) {
480 err = PTR_ERR(npage[i]);
481 f2fs_put_page(npage[0], 0);
482 goto release_out;
483 }
484 }
485 if (i < level) {
486 parent = npage[i];
487 nids[i + 1] = get_nid(parent, offset[i], false);
488 }
489 }
490 dn->nid = nids[level];
491 dn->ofs_in_node = offset[level];
492 dn->node_page = npage[level];
493 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
494 return 0;
495
496release_pages:
497 f2fs_put_page(parent, 1);
498 if (i > 1)
499 f2fs_put_page(npage[0], 0);
500release_out:
501 dn->inode_page = NULL;
502 dn->node_page = NULL;
503 return err;
504}
505
506static void truncate_node(struct dnode_of_data *dn)
507{
508 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
509 struct node_info ni;
510
511 get_node_info(sbi, dn->nid, &ni);
512 if (dn->inode->i_blocks == 0) {
513 f2fs_bug_on(ni.blk_addr != NULL_ADDR);
514 goto invalidate;
515 }
516 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
517
518 /* Deallocate node address */
519 invalidate_blocks(sbi, ni.blk_addr);
Changman Leeb1a94e82013-11-15 10:42:51 +0900520 dec_valid_node_count(sbi, dn->inode);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900521 set_node_addr(sbi, &ni, NULL_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800522
523 if (dn->nid == dn->inode->i_ino) {
524 remove_orphan_inode(sbi, dn->nid);
525 dec_valid_inode_count(sbi);
526 } else {
527 sync_inode_page(dn);
528 }
529invalidate:
530 clear_node_page_dirty(dn->node_page);
531 F2FS_SET_SB_DIRT(sbi);
532
533 f2fs_put_page(dn->node_page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900534
535 invalidate_mapping_pages(NODE_MAPPING(sbi),
536 dn->node_page->index, dn->node_page->index);
537
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800538 dn->node_page = NULL;
539 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
540}
541
542static int truncate_dnode(struct dnode_of_data *dn)
543{
544 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
545 struct page *page;
546
547 if (dn->nid == 0)
548 return 1;
549
550 /* get direct node */
551 page = get_node_page(sbi, dn->nid);
552 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
553 return 1;
554 else if (IS_ERR(page))
555 return PTR_ERR(page);
556
557 /* Make dnode_of_data for parameter */
558 dn->node_page = page;
559 dn->ofs_in_node = 0;
560 truncate_data_blocks(dn);
561 truncate_node(dn);
562 return 1;
563}
564
565static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
566 int ofs, int depth)
567{
568 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
569 struct dnode_of_data rdn = *dn;
570 struct page *page;
571 struct f2fs_node *rn;
572 nid_t child_nid;
573 unsigned int child_nofs;
574 int freed = 0;
575 int i, ret;
576
577 if (dn->nid == 0)
578 return NIDS_PER_BLOCK + 1;
579
580 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
581
582 page = get_node_page(sbi, dn->nid);
583 if (IS_ERR(page)) {
584 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
585 return PTR_ERR(page);
586 }
587
588 rn = F2FS_NODE(page);
589 if (depth < 3) {
590 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
591 child_nid = le32_to_cpu(rn->in.nid[i]);
592 if (child_nid == 0)
593 continue;
594 rdn.nid = child_nid;
595 ret = truncate_dnode(&rdn);
596 if (ret < 0)
597 goto out_err;
598 set_nid(page, i, 0, false);
599 }
600 } else {
601 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
602 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
603 child_nid = le32_to_cpu(rn->in.nid[i]);
604 if (child_nid == 0) {
605 child_nofs += NIDS_PER_BLOCK + 1;
606 continue;
607 }
608 rdn.nid = child_nid;
609 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
610 if (ret == (NIDS_PER_BLOCK + 1)) {
611 set_nid(page, i, 0, false);
612 child_nofs += ret;
613 } else if (ret < 0 && ret != -ENOENT) {
614 goto out_err;
615 }
616 }
617 freed = child_nofs;
618 }
619
620 if (!ofs) {
621 /* remove current indirect node */
622 dn->node_page = page;
623 truncate_node(dn);
624 freed++;
625 } else {
626 f2fs_put_page(page, 1);
627 }
628 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
629 return freed;
630
631out_err:
632 f2fs_put_page(page, 1);
633 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
634 return ret;
635}
636
637static int truncate_partial_nodes(struct dnode_of_data *dn,
638 struct f2fs_inode *ri, int *offset, int depth)
639{
640 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
641 struct page *pages[2];
642 nid_t nid[3];
643 nid_t child_nid;
644 int err = 0;
645 int i;
646 int idx = depth - 2;
647
648 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
649 if (!nid[0])
650 return 0;
651
652 /* get indirect nodes in the path */
Changman Leeb1a94e82013-11-15 10:42:51 +0900653 for (i = 0; i < idx + 1; i++) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800654 /* refernece count'll be increased */
655 pages[i] = get_node_page(sbi, nid[i]);
656 if (IS_ERR(pages[i])) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800657 err = PTR_ERR(pages[i]);
Changman Leeb1a94e82013-11-15 10:42:51 +0900658 idx = i - 1;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800659 goto fail;
660 }
661 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
662 }
663
664 /* free direct nodes linked to a partial indirect node */
Changman Leeb1a94e82013-11-15 10:42:51 +0900665 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800666 child_nid = get_nid(pages[idx], i, false);
667 if (!child_nid)
668 continue;
669 dn->nid = child_nid;
670 err = truncate_dnode(dn);
671 if (err < 0)
672 goto fail;
673 set_nid(pages[idx], i, 0, false);
674 }
675
Changman Leeb1a94e82013-11-15 10:42:51 +0900676 if (offset[idx + 1] == 0) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800677 dn->node_page = pages[idx];
678 dn->nid = nid[idx];
679 truncate_node(dn);
680 } else {
681 f2fs_put_page(pages[idx], 1);
682 }
683 offset[idx]++;
Changman Leeb1a94e82013-11-15 10:42:51 +0900684 offset[idx + 1] = 0;
685 idx--;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800686fail:
Changman Leeb1a94e82013-11-15 10:42:51 +0900687 for (i = idx; i >= 0; i--)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800688 f2fs_put_page(pages[i], 1);
689
690 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
691
692 return err;
693}
694
695/*
696 * All the block addresses of data and nodes should be nullified.
697 */
698int truncate_inode_blocks(struct inode *inode, pgoff_t from)
699{
700 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800701 int err = 0, cont = 1;
702 int level, offset[4], noffset[4];
703 unsigned int nofs = 0;
Changman Leeb1a94e82013-11-15 10:42:51 +0900704 struct f2fs_inode *ri;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800705 struct dnode_of_data dn;
706 struct page *page;
707
708 trace_f2fs_truncate_inode_blocks_enter(inode, from);
709
710 level = get_node_path(F2FS_I(inode), from, offset, noffset);
711restart:
712 page = get_node_page(sbi, inode->i_ino);
713 if (IS_ERR(page)) {
714 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
715 return PTR_ERR(page);
716 }
717
718 set_new_dnode(&dn, inode, page, NULL, 0);
719 unlock_page(page);
720
Changman Leeb1a94e82013-11-15 10:42:51 +0900721 ri = F2FS_INODE(page);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800722 switch (level) {
723 case 0:
724 case 1:
725 nofs = noffset[1];
726 break;
727 case 2:
728 nofs = noffset[1];
729 if (!offset[level - 1])
730 goto skip_partial;
Changman Leeb1a94e82013-11-15 10:42:51 +0900731 err = truncate_partial_nodes(&dn, ri, offset, level);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800732 if (err < 0 && err != -ENOENT)
733 goto fail;
734 nofs += 1 + NIDS_PER_BLOCK;
735 break;
736 case 3:
737 nofs = 5 + 2 * NIDS_PER_BLOCK;
738 if (!offset[level - 1])
739 goto skip_partial;
Changman Leeb1a94e82013-11-15 10:42:51 +0900740 err = truncate_partial_nodes(&dn, ri, offset, level);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800741 if (err < 0 && err != -ENOENT)
742 goto fail;
743 break;
744 default:
745 BUG();
746 }
747
748skip_partial:
749 while (cont) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900750 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800751 switch (offset[0]) {
752 case NODE_DIR1_BLOCK:
753 case NODE_DIR2_BLOCK:
754 err = truncate_dnode(&dn);
755 break;
756
757 case NODE_IND1_BLOCK:
758 case NODE_IND2_BLOCK:
759 err = truncate_nodes(&dn, nofs, offset[1], 2);
760 break;
761
762 case NODE_DIND_BLOCK:
763 err = truncate_nodes(&dn, nofs, offset[1], 3);
764 cont = 0;
765 break;
766
767 default:
768 BUG();
769 }
770 if (err < 0 && err != -ENOENT)
771 goto fail;
772 if (offset[1] == 0 &&
Changman Leeb1a94e82013-11-15 10:42:51 +0900773 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800774 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900775 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800776 f2fs_put_page(page, 1);
777 goto restart;
778 }
Jaegeuk Kim4b66d802014-03-18 13:29:07 +0900779 f2fs_wait_on_page_writeback(page, NODE);
Changman Leeb1a94e82013-11-15 10:42:51 +0900780 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800781 set_page_dirty(page);
782 unlock_page(page);
783 }
784 offset[1] = 0;
785 offset[0]++;
786 nofs += err;
787 }
788fail:
789 f2fs_put_page(page, 0);
790 trace_f2fs_truncate_inode_blocks_exit(inode, err);
791 return err > 0 ? 0 : err;
792}
793
794int truncate_xattr_node(struct inode *inode, struct page *page)
795{
796 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
797 nid_t nid = F2FS_I(inode)->i_xattr_nid;
798 struct dnode_of_data dn;
799 struct page *npage;
800
801 if (!nid)
802 return 0;
803
804 npage = get_node_page(sbi, nid);
805 if (IS_ERR(npage))
806 return PTR_ERR(npage);
807
808 F2FS_I(inode)->i_xattr_nid = 0;
809
810 /* need to do checkpoint during fsync */
811 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
812
813 set_new_dnode(&dn, inode, page, npage, nid);
814
815 if (page)
Changman Leeb1a94e82013-11-15 10:42:51 +0900816 dn.inode_page_locked = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800817 truncate_node(&dn);
818 return 0;
819}
820
821/*
Changman Leeb1a94e82013-11-15 10:42:51 +0900822 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
823 * f2fs_unlock_op().
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800824 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900825void remove_inode_page(struct inode *inode)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800826{
827 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
828 struct page *page;
829 nid_t ino = inode->i_ino;
830 struct dnode_of_data dn;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800831
832 page = get_node_page(sbi, ino);
833 if (IS_ERR(page))
Changman Leeb1a94e82013-11-15 10:42:51 +0900834 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800835
Changman Leeb1a94e82013-11-15 10:42:51 +0900836 if (truncate_xattr_node(inode, page)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800837 f2fs_put_page(page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900838 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800839 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800840 /* 0 is possible, after f2fs_new_inode() is failed */
841 f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
842 set_new_dnode(&dn, inode, page, page, ino);
843 truncate_node(&dn);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800844}
845
846struct page *new_inode_page(struct inode *inode, const struct qstr *name)
847{
848 struct dnode_of_data dn;
849
850 /* allocate inode page for new inode */
851 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
852
853 /* caller should f2fs_put_page(page, 1); */
854 return new_node_page(&dn, 0, NULL);
855}
856
857struct page *new_node_page(struct dnode_of_data *dn,
858 unsigned int ofs, struct page *ipage)
859{
860 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800861 struct node_info old_ni, new_ni;
862 struct page *page;
863 int err;
864
Changman Leeb1a94e82013-11-15 10:42:51 +0900865 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800866 return ERR_PTR(-EPERM);
867
Jaegeuk Kim0f9e6b52014-04-29 17:28:32 +0900868 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800869 if (!page)
870 return ERR_PTR(-ENOMEM);
871
Changman Leeb1a94e82013-11-15 10:42:51 +0900872 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800873 err = -ENOSPC;
874 goto fail;
875 }
876
877 get_node_info(sbi, dn->nid, &old_ni);
878
879 /* Reinitialize old_ni with new node page */
880 f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
881 new_ni = old_ni;
882 new_ni.ino = dn->inode->i_ino;
Jaegeuk Kimc0563e22014-03-20 21:52:53 +0900883 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800884
Jaegeuk Kim0f9e6b52014-04-29 17:28:32 +0900885 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800886 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
887 set_cold_node(dn->inode, page);
888 SetPageUptodate(page);
889 set_page_dirty(page);
890
Chao Yueea95c42014-03-17 16:35:06 +0800891 if (f2fs_has_xattr_block(ofs))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800892 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
893
894 dn->node_page = page;
895 if (ipage)
896 update_inode(dn->inode, ipage);
897 else
898 sync_inode_page(dn);
899 if (ofs == 0)
900 inc_valid_inode_count(sbi);
901
902 return page;
903
904fail:
905 clear_node_page_dirty(page);
906 f2fs_put_page(page, 1);
907 return ERR_PTR(err);
908}
909
910/*
911 * Caller should do after getting the following values.
912 * 0: f2fs_put_page(page, 0)
913 * LOCKED_PAGE: f2fs_put_page(page, 1)
914 * error: nothing
915 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900916static int read_node_page(struct page *page, int rw)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800917{
918 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
919 struct node_info ni;
920
921 get_node_info(sbi, page->index, &ni);
922
Changman Leeb1a94e82013-11-15 10:42:51 +0900923 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800924 f2fs_put_page(page, 1);
925 return -ENOENT;
926 }
927
928 if (PageUptodate(page))
929 return LOCKED_PAGE;
930
Changman Leeb1a94e82013-11-15 10:42:51 +0900931 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800932}
933
934/*
935 * Readahead a node page
936 */
937void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
938{
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800939 struct page *apage;
940 int err;
941
Changman Leeb1a94e82013-11-15 10:42:51 +0900942 apage = find_get_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800943 if (apage && PageUptodate(apage)) {
944 f2fs_put_page(apage, 0);
945 return;
946 }
947 f2fs_put_page(apage, 0);
948
Changman Leeb1a94e82013-11-15 10:42:51 +0900949 apage = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800950 if (!apage)
951 return;
952
953 err = read_node_page(apage, READA);
954 if (err == 0)
955 f2fs_put_page(apage, 0);
956 else if (err == LOCKED_PAGE)
957 f2fs_put_page(apage, 1);
958}
959
960struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
961{
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800962 struct page *page;
963 int err;
964repeat:
Jaegeuk Kim0f9e6b52014-04-29 17:28:32 +0900965 page = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800966 if (!page)
967 return ERR_PTR(-ENOMEM);
968
969 err = read_node_page(page, READ_SYNC);
970 if (err < 0)
971 return ERR_PTR(err);
972 else if (err == LOCKED_PAGE)
973 goto got_it;
974
975 lock_page(page);
Jaegeuk Kima68f2892014-04-01 17:38:26 +0900976 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800977 f2fs_put_page(page, 1);
978 return ERR_PTR(-EIO);
979 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900980 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800981 f2fs_put_page(page, 1);
982 goto repeat;
983 }
984got_it:
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800985 mark_page_accessed(page);
986 return page;
987}
988
989/*
990 * Return a locked page for the desired node page.
991 * And, readahead MAX_RA_NODE number of node pages.
992 */
993struct page *get_node_page_ra(struct page *parent, int start)
994{
995 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800996 struct blk_plug plug;
997 struct page *page;
998 int err, i, end;
999 nid_t nid;
1000
1001 /* First, try getting the desired direct node. */
1002 nid = get_nid(parent, start, false);
1003 if (!nid)
1004 return ERR_PTR(-ENOENT);
1005repeat:
Changman Leeb1a94e82013-11-15 10:42:51 +09001006 page = grab_cache_page(NODE_MAPPING(sbi), nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001007 if (!page)
1008 return ERR_PTR(-ENOMEM);
1009
1010 err = read_node_page(page, READ_SYNC);
1011 if (err < 0)
1012 return ERR_PTR(err);
1013 else if (err == LOCKED_PAGE)
1014 goto page_hit;
1015
1016 blk_start_plug(&plug);
1017
1018 /* Then, try readahead for siblings of the desired node */
1019 end = start + MAX_RA_NODE;
1020 end = min(end, NIDS_PER_BLOCK);
1021 for (i = start + 1; i < end; i++) {
1022 nid = get_nid(parent, i, false);
1023 if (!nid)
1024 continue;
1025 ra_node_page(sbi, nid);
1026 }
1027
1028 blk_finish_plug(&plug);
1029
1030 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001031 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001032 f2fs_put_page(page, 1);
1033 goto repeat;
1034 }
1035page_hit:
Changman Leeb1a94e82013-11-15 10:42:51 +09001036 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001037 f2fs_put_page(page, 1);
1038 return ERR_PTR(-EIO);
1039 }
1040 mark_page_accessed(page);
1041 return page;
1042}
1043
1044void sync_inode_page(struct dnode_of_data *dn)
1045{
1046 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1047 update_inode(dn->inode, dn->node_page);
1048 } else if (dn->inode_page) {
1049 if (!dn->inode_page_locked)
1050 lock_page(dn->inode_page);
1051 update_inode(dn->inode, dn->inode_page);
1052 if (!dn->inode_page_locked)
1053 unlock_page(dn->inode_page);
1054 } else {
1055 update_inode_page(dn->inode);
1056 }
1057}
1058
1059int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1060 struct writeback_control *wbc)
1061{
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001062 pgoff_t index, end;
1063 struct pagevec pvec;
1064 int step = ino ? 2 : 0;
1065 int nwritten = 0, wrote = 0;
1066
1067 pagevec_init(&pvec, 0);
1068
1069next_step:
1070 index = 0;
1071 end = LONG_MAX;
1072
1073 while (index <= end) {
1074 int i, nr_pages;
Changman Leeb1a94e82013-11-15 10:42:51 +09001075 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001076 PAGECACHE_TAG_DIRTY,
1077 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1078 if (nr_pages == 0)
1079 break;
1080
1081 for (i = 0; i < nr_pages; i++) {
1082 struct page *page = pvec.pages[i];
1083
1084 /*
1085 * flushing sequence with step:
1086 * 0. indirect nodes
1087 * 1. dentry dnodes
1088 * 2. file dnodes
1089 */
1090 if (step == 0 && IS_DNODE(page))
1091 continue;
1092 if (step == 1 && (!IS_DNODE(page) ||
1093 is_cold_node(page)))
1094 continue;
1095 if (step == 2 && (!IS_DNODE(page) ||
1096 !is_cold_node(page)))
1097 continue;
1098
1099 /*
1100 * If an fsync mode,
1101 * we should not skip writing node pages.
1102 */
1103 if (ino && ino_of_node(page) == ino)
1104 lock_page(page);
1105 else if (!trylock_page(page))
1106 continue;
1107
Changman Leeb1a94e82013-11-15 10:42:51 +09001108 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001109continue_unlock:
1110 unlock_page(page);
1111 continue;
1112 }
1113 if (ino && ino_of_node(page) != ino)
1114 goto continue_unlock;
1115
1116 if (!PageDirty(page)) {
1117 /* someone wrote it for us */
1118 goto continue_unlock;
1119 }
1120
1121 if (!clear_page_dirty_for_io(page))
1122 goto continue_unlock;
1123
1124 /* called by fsync() */
1125 if (ino && IS_DNODE(page)) {
1126 int mark = !is_checkpointed_node(sbi, ino);
1127 set_fsync_mark(page, 1);
1128 if (IS_INODE(page))
1129 set_dentry_mark(page, mark);
1130 nwritten++;
1131 } else {
1132 set_fsync_mark(page, 0);
1133 set_dentry_mark(page, 0);
1134 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001135 NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001136 wrote++;
1137
1138 if (--wbc->nr_to_write == 0)
1139 break;
1140 }
1141 pagevec_release(&pvec);
1142 cond_resched();
1143
1144 if (wbc->nr_to_write == 0) {
1145 step = 2;
1146 break;
1147 }
1148 }
1149
1150 if (step < 2) {
1151 step++;
1152 goto next_step;
1153 }
1154
1155 if (wrote)
Changman Leeb1a94e82013-11-15 10:42:51 +09001156 f2fs_submit_merged_bio(sbi, NODE, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001157 return nwritten;
1158}
1159
1160int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1161{
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001162 pgoff_t index = 0, end = LONG_MAX;
1163 struct pagevec pvec;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001164 int ret2 = 0, ret = 0;
1165
1166 pagevec_init(&pvec, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +09001167
1168 while (index <= end) {
1169 int i, nr_pages;
1170 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1171 PAGECACHE_TAG_WRITEBACK,
1172 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1173 if (nr_pages == 0)
1174 break;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001175
1176 for (i = 0; i < nr_pages; i++) {
1177 struct page *page = pvec.pages[i];
1178
1179 /* until radix tree lookup accepts end_index */
Changman Leeb1a94e82013-11-15 10:42:51 +09001180 if (unlikely(page->index > end))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001181 continue;
1182
1183 if (ino && ino_of_node(page) == ino) {
Jaegeuk Kim4b66d802014-03-18 13:29:07 +09001184 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001185 if (TestClearPageError(page))
1186 ret = -EIO;
1187 }
1188 }
1189 pagevec_release(&pvec);
1190 cond_resched();
1191 }
1192
Changman Leeb1a94e82013-11-15 10:42:51 +09001193 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001194 ret2 = -ENOSPC;
Changman Leeb1a94e82013-11-15 10:42:51 +09001195 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001196 ret2 = -EIO;
1197 if (!ret)
1198 ret = ret2;
1199 return ret;
1200}
1201
1202static int f2fs_write_node_page(struct page *page,
1203 struct writeback_control *wbc)
1204{
1205 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1206 nid_t nid;
1207 block_t new_addr;
1208 struct node_info ni;
Changman Leeb1a94e82013-11-15 10:42:51 +09001209 struct f2fs_io_info fio = {
1210 .type = NODE,
1211 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1212 };
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001213
Chao Yu327cb6d2014-05-06 16:48:26 +08001214 trace_f2fs_writepage(page, NODE);
1215
Changman Leeb1a94e82013-11-15 10:42:51 +09001216 if (unlikely(sbi->por_doing))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001217 goto redirty_out;
1218
Jaegeuk Kim4b66d802014-03-18 13:29:07 +09001219 f2fs_wait_on_page_writeback(page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001220
1221 /* get old block addr of this node page */
1222 nid = nid_of_node(page);
1223 f2fs_bug_on(page->index != nid);
1224
1225 get_node_info(sbi, nid, &ni);
1226
1227 /* This page is already truncated */
Changman Leeb1a94e82013-11-15 10:42:51 +09001228 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001229 dec_page_count(sbi, F2FS_DIRTY_NODES);
1230 unlock_page(page);
1231 return 0;
1232 }
1233
1234 if (wbc->for_reclaim)
1235 goto redirty_out;
1236
1237 mutex_lock(&sbi->node_write);
1238 set_page_writeback(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001239 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001240 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001241 dec_page_count(sbi, F2FS_DIRTY_NODES);
1242 mutex_unlock(&sbi->node_write);
1243 unlock_page(page);
1244 return 0;
1245
1246redirty_out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +09001247 redirty_page_for_writepage(wbc, page);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001248 return AOP_WRITEPAGE_ACTIVATE;
1249}
1250
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001251static int f2fs_write_node_pages(struct address_space *mapping,
1252 struct writeback_control *wbc)
1253{
1254 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001255 long diff;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001256
Chao Yub4d85492014-05-06 16:51:24 +08001257 trace_f2fs_writepages(mapping->host, wbc, NODE);
1258
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001259 /* balancing f2fs's metadata in background */
1260 f2fs_balance_fs_bg(sbi);
1261
1262 /* collect a number of dirty node pages and write together */
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +09001263 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
Jaegeuk Kim823d59f2014-03-18 13:43:05 +09001264 goto skip_write;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001265
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001266 diff = nr_pages_to_write(sbi, NODE, wbc);
Changman Leeb1a94e82013-11-15 10:42:51 +09001267 wbc->sync_mode = WB_SYNC_NONE;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001268 sync_node_pages(sbi, 0, wbc);
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +09001269 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001270 return 0;
Jaegeuk Kim823d59f2014-03-18 13:43:05 +09001271
1272skip_write:
1273 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1274 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001275}
1276
1277static int f2fs_set_node_page_dirty(struct page *page)
1278{
1279 struct address_space *mapping = page->mapping;
1280 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1281
1282 trace_f2fs_set_page_dirty(page, NODE);
1283
1284 SetPageUptodate(page);
1285 if (!PageDirty(page)) {
1286 __set_page_dirty_nobuffers(page);
1287 inc_page_count(sbi, F2FS_DIRTY_NODES);
1288 SetPagePrivate(page);
1289 return 1;
1290 }
1291 return 0;
1292}
1293
1294static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1295{
1296 struct inode *inode = page->mapping->host;
1297 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1298 if (PageDirty(page))
1299 dec_page_count(sbi, F2FS_DIRTY_NODES);
1300 ClearPagePrivate(page);
1301}
1302
1303static int f2fs_release_node_page(struct page *page, gfp_t wait)
1304{
1305 ClearPagePrivate(page);
1306 return 1;
1307}
1308
1309/*
1310 * Structure of the f2fs node operations
1311 */
1312const struct address_space_operations f2fs_node_aops = {
1313 .writepage = f2fs_write_node_page,
1314 .writepages = f2fs_write_node_pages,
1315 .set_page_dirty = f2fs_set_node_page_dirty,
1316 .invalidatepage = f2fs_invalidate_node_page,
1317 .releasepage = f2fs_release_node_page,
1318};
1319
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001320static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1321 nid_t n)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001322{
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001323 return radix_tree_lookup(&nm_i->free_nid_root, n);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001324}
1325
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001326static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1327 struct free_nid *i)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001328{
1329 list_del(&i->list);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001330 radix_tree_delete(&nm_i->free_nid_root, i->nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001331}
1332
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001333static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001334{
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001335 struct f2fs_nm_info *nm_i = NM_I(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001336 struct free_nid *i;
1337 struct nat_entry *ne;
1338 bool allocated = false;
1339
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001340 if (!available_free_memory(sbi, FREE_NIDS))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001341 return -1;
1342
1343 /* 0 nid should not be used */
Changman Leeb1a94e82013-11-15 10:42:51 +09001344 if (unlikely(nid == 0))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001345 return 0;
1346
1347 if (build) {
1348 /* do not add allocated nids */
1349 read_lock(&nm_i->nat_tree_lock);
1350 ne = __lookup_nat_cache(nm_i, nid);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001351 if (ne &&
1352 (!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001353 allocated = true;
1354 read_unlock(&nm_i->nat_tree_lock);
1355 if (allocated)
1356 return 0;
1357 }
1358
1359 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1360 i->nid = nid;
1361 i->state = NID_NEW;
1362
1363 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001364 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001365 spin_unlock(&nm_i->free_nid_list_lock);
1366 kmem_cache_free(free_nid_slab, i);
1367 return 0;
1368 }
1369 list_add_tail(&i->list, &nm_i->free_nid_list);
1370 nm_i->fcnt++;
1371 spin_unlock(&nm_i->free_nid_list_lock);
1372 return 1;
1373}
1374
1375static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1376{
1377 struct free_nid *i;
Chao Yu784b1352014-04-02 08:55:00 +08001378 bool need_free = false;
1379
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001380 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001381 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001382 if (i && i->state == NID_NEW) {
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001383 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001384 nm_i->fcnt--;
Chao Yu784b1352014-04-02 08:55:00 +08001385 need_free = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001386 }
1387 spin_unlock(&nm_i->free_nid_list_lock);
Chao Yu784b1352014-04-02 08:55:00 +08001388
1389 if (need_free)
1390 kmem_cache_free(free_nid_slab, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001391}
1392
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001393static void scan_nat_page(struct f2fs_sb_info *sbi,
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001394 struct page *nat_page, nid_t start_nid)
1395{
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001396 struct f2fs_nm_info *nm_i = NM_I(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001397 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1398 block_t blk_addr;
1399 int i;
1400
1401 i = start_nid % NAT_ENTRY_PER_BLOCK;
1402
1403 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1404
Changman Leeb1a94e82013-11-15 10:42:51 +09001405 if (unlikely(start_nid >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001406 break;
1407
1408 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1409 f2fs_bug_on(blk_addr == NEW_ADDR);
1410 if (blk_addr == NULL_ADDR) {
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001411 if (add_free_nid(sbi, start_nid, true) < 0)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001412 break;
1413 }
1414 }
1415}
1416
1417static void build_free_nids(struct f2fs_sb_info *sbi)
1418{
1419 struct f2fs_nm_info *nm_i = NM_I(sbi);
1420 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1421 struct f2fs_summary_block *sum = curseg->sum_blk;
1422 int i = 0;
1423 nid_t nid = nm_i->next_scan_nid;
1424
1425 /* Enough entries */
1426 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1427 return;
1428
1429 /* readahead nat pages to be scanned */
Chao Yu624b14f2014-02-07 16:11:53 +08001430 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001431
1432 while (1) {
1433 struct page *page = get_current_nat_page(sbi, nid);
1434
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001435 scan_nat_page(sbi, page, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001436 f2fs_put_page(page, 1);
1437
1438 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
Changman Leeb1a94e82013-11-15 10:42:51 +09001439 if (unlikely(nid >= nm_i->max_nid))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001440 nid = 0;
1441
1442 if (i++ == FREE_NID_PAGES)
1443 break;
1444 }
1445
1446 /* go to the next free nat pages to find free nids abundantly */
1447 nm_i->next_scan_nid = nid;
1448
1449 /* find free nids from current sum_pages */
1450 mutex_lock(&curseg->curseg_mutex);
1451 for (i = 0; i < nats_in_cursum(sum); i++) {
1452 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1453 nid = le32_to_cpu(nid_in_journal(sum, i));
1454 if (addr == NULL_ADDR)
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001455 add_free_nid(sbi, nid, true);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001456 else
1457 remove_free_nid(nm_i, nid);
1458 }
1459 mutex_unlock(&curseg->curseg_mutex);
1460}
1461
1462/*
1463 * If this function returns success, caller can obtain a new nid
1464 * from second parameter of this function.
1465 * The returned nid could be used ino as well as nid when inode is created.
1466 */
1467bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1468{
1469 struct f2fs_nm_info *nm_i = NM_I(sbi);
1470 struct free_nid *i = NULL;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001471retry:
Jaegeuk Kim989f9142014-04-18 11:14:37 +09001472 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001473 return false;
1474
1475 spin_lock(&nm_i->free_nid_list_lock);
1476
1477 /* We should not use stale free nids created by build_free_nids */
Gu Zhengaa9c8b02014-02-21 18:08:29 +08001478 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001479 f2fs_bug_on(list_empty(&nm_i->free_nid_list));
Chao Yu48c561a2014-03-29 11:33:17 +08001480 list_for_each_entry(i, &nm_i->free_nid_list, list)
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001481 if (i->state == NID_NEW)
1482 break;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001483
1484 f2fs_bug_on(i->state != NID_NEW);
1485 *nid = i->nid;
1486 i->state = NID_ALLOC;
1487 nm_i->fcnt--;
1488 spin_unlock(&nm_i->free_nid_list_lock);
1489 return true;
1490 }
1491 spin_unlock(&nm_i->free_nid_list_lock);
1492
1493 /* Let's scan nat pages and its caches to get free nids */
1494 mutex_lock(&nm_i->build_lock);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001495 build_free_nids(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001496 mutex_unlock(&nm_i->build_lock);
1497 goto retry;
1498}
1499
1500/*
1501 * alloc_nid() should be called prior to this function.
1502 */
1503void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1504{
1505 struct f2fs_nm_info *nm_i = NM_I(sbi);
1506 struct free_nid *i;
1507
1508 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001509 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001510 f2fs_bug_on(!i || i->state != NID_ALLOC);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001511 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001512 spin_unlock(&nm_i->free_nid_list_lock);
Chao Yu784b1352014-04-02 08:55:00 +08001513
1514 kmem_cache_free(free_nid_slab, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001515}
1516
1517/*
1518 * alloc_nid() should be called prior to this function.
1519 */
1520void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1521{
1522 struct f2fs_nm_info *nm_i = NM_I(sbi);
1523 struct free_nid *i;
Chao Yu784b1352014-04-02 08:55:00 +08001524 bool need_free = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001525
1526 if (!nid)
1527 return;
1528
1529 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001530 i = __lookup_free_nid_list(nm_i, nid);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001531 f2fs_bug_on(!i || i->state != NID_ALLOC);
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001532 if (!available_free_memory(sbi, FREE_NIDS)) {
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001533 __del_from_free_nid_list(nm_i, i);
Chao Yu784b1352014-04-02 08:55:00 +08001534 need_free = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001535 } else {
1536 i->state = NID_NEW;
1537 nm_i->fcnt++;
1538 }
1539 spin_unlock(&nm_i->free_nid_list_lock);
Chao Yu784b1352014-04-02 08:55:00 +08001540
1541 if (need_free)
1542 kmem_cache_free(free_nid_slab, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001543}
1544
1545void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1546 struct f2fs_summary *sum, struct node_info *ni,
1547 block_t new_blkaddr)
1548{
1549 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001550 set_node_addr(sbi, ni, new_blkaddr, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001551 clear_node_page_dirty(page);
1552}
1553
Jingoo Han2e4e9862014-04-15 17:51:05 +09001554static void recover_inline_xattr(struct inode *inode, struct page *page)
Chao Yub3606c92014-03-11 13:37:38 +08001555{
1556 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1557 void *src_addr, *dst_addr;
1558 size_t inline_size;
1559 struct page *ipage;
1560 struct f2fs_inode *ri;
1561
Chao Yu70da8c62014-03-12 15:59:03 +08001562 if (!f2fs_has_inline_xattr(inode))
Chao Yub3606c92014-03-11 13:37:38 +08001563 return;
1564
1565 if (!IS_INODE(page))
1566 return;
1567
1568 ri = F2FS_INODE(page);
1569 if (!(ri->i_inline & F2FS_INLINE_XATTR))
1570 return;
1571
1572 ipage = get_node_page(sbi, inode->i_ino);
1573 f2fs_bug_on(IS_ERR(ipage));
1574
1575 dst_addr = inline_xattr_addr(ipage);
1576 src_addr = inline_xattr_addr(page);
1577 inline_size = inline_xattr_size(inode);
1578
Jaegeuk Kim0f9e6b52014-04-29 17:28:32 +09001579 f2fs_wait_on_page_writeback(ipage, NODE);
Chao Yub3606c92014-03-11 13:37:38 +08001580 memcpy(dst_addr, src_addr, inline_size);
1581
1582 update_inode(inode, ipage);
1583 f2fs_put_page(ipage, 1);
1584}
1585
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001586bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1587{
1588 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1589 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1590 nid_t new_xnid = nid_of_node(page);
1591 struct node_info ni;
1592
Chao Yub3606c92014-03-11 13:37:38 +08001593 recover_inline_xattr(inode, page);
1594
Chao Yueea95c42014-03-17 16:35:06 +08001595 if (!f2fs_has_xattr_block(ofs_of_node(page)))
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001596 return false;
1597
1598 /* 1: invalidate the previous xattr nid */
1599 if (!prev_xnid)
1600 goto recover_xnid;
1601
1602 /* Deallocate node address */
1603 get_node_info(sbi, prev_xnid, &ni);
1604 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
1605 invalidate_blocks(sbi, ni.blk_addr);
1606 dec_valid_node_count(sbi, inode);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001607 set_node_addr(sbi, &ni, NULL_ADDR, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001608
1609recover_xnid:
1610 /* 2: allocate new xattr nid */
1611 if (unlikely(!inc_valid_node_count(sbi, inode)))
1612 f2fs_bug_on(1);
1613
1614 remove_free_nid(NM_I(sbi), new_xnid);
1615 get_node_info(sbi, new_xnid, &ni);
1616 ni.ino = inode->i_ino;
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001617 set_node_addr(sbi, &ni, NEW_ADDR, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001618 F2FS_I(inode)->i_xattr_nid = new_xnid;
1619
1620 /* 3: update xattr blkaddr */
1621 refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001622 set_node_addr(sbi, &ni, blkaddr, false);
Jaegeuk Kimb759f5f2014-01-28 12:25:06 +09001623
1624 update_inode_page(inode);
1625 return true;
1626}
1627
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001628int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1629{
Changman Leeb1a94e82013-11-15 10:42:51 +09001630 struct f2fs_inode *src, *dst;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001631 nid_t ino = ino_of_node(page);
1632 struct node_info old_ni, new_ni;
1633 struct page *ipage;
1634
Jaegeuk Kimeb6b0492014-04-18 15:21:04 +09001635 get_node_info(sbi, ino, &old_ni);
1636
1637 if (unlikely(old_ni.blk_addr != NULL_ADDR))
1638 return -EINVAL;
1639
Changman Leeb1a94e82013-11-15 10:42:51 +09001640 ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001641 if (!ipage)
1642 return -ENOMEM;
1643
1644 /* Should not use this inode from free nid list */
1645 remove_free_nid(NM_I(sbi), ino);
1646
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001647 SetPageUptodate(ipage);
1648 fill_node_footer(ipage, ino, ino, 0, true);
1649
Changman Leeb1a94e82013-11-15 10:42:51 +09001650 src = F2FS_INODE(page);
1651 dst = F2FS_INODE(ipage);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001652
Changman Leeb1a94e82013-11-15 10:42:51 +09001653 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
1654 dst->i_size = 0;
1655 dst->i_blocks = cpu_to_le64(1);
1656 dst->i_links = cpu_to_le32(1);
1657 dst->i_xattr_nid = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001658
1659 new_ni = old_ni;
1660 new_ni.ino = ino;
1661
Changman Leeb1a94e82013-11-15 10:42:51 +09001662 if (unlikely(!inc_valid_node_count(sbi, NULL)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001663 WARN_ON(1);
Jaegeuk Kimc0563e22014-03-20 21:52:53 +09001664 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001665 inc_valid_inode_count(sbi);
1666 f2fs_put_page(ipage, 1);
1667 return 0;
1668}
1669
Changman Leeb1a94e82013-11-15 10:42:51 +09001670/*
1671 * ra_sum_pages() merge contiguous pages into one bio and submit.
Chao Yu53826b52014-05-27 08:41:07 +08001672 * these pre-readed pages are alloced in bd_inode's mapping tree.
Changman Leeb1a94e82013-11-15 10:42:51 +09001673 */
Chao Yu53826b52014-05-27 08:41:07 +08001674static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
Changman Leeb1a94e82013-11-15 10:42:51 +09001675 int start, int nrpages)
1676{
Chao Yu53826b52014-05-27 08:41:07 +08001677 struct inode *inode = sbi->sb->s_bdev->bd_inode;
1678 struct address_space *mapping = inode->i_mapping;
1679 int i, page_idx = start;
Changman Leeb1a94e82013-11-15 10:42:51 +09001680 struct f2fs_io_info fio = {
1681 .type = META,
1682 .rw = READ_SYNC | REQ_META | REQ_PRIO
1683 };
1684
Chao Yu53826b52014-05-27 08:41:07 +08001685 for (i = 0; page_idx < start + nrpages; page_idx++, i++) {
1686 /* alloc page in bd_inode for reading node summary info */
1687 pages[i] = grab_cache_page(mapping, page_idx);
1688 if (!pages[i])
Gu Zheng12e374b2014-03-07 18:43:36 +08001689 break;
Chao Yu53826b52014-05-27 08:41:07 +08001690 f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio);
Changman Leeb1a94e82013-11-15 10:42:51 +09001691 }
1692
Changman Leeb1a94e82013-11-15 10:42:51 +09001693 f2fs_submit_merged_bio(sbi, META, READ);
Chao Yu53826b52014-05-27 08:41:07 +08001694 return i;
Changman Leeb1a94e82013-11-15 10:42:51 +09001695}
1696
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001697int restore_node_summary(struct f2fs_sb_info *sbi,
1698 unsigned int segno, struct f2fs_summary_block *sum)
1699{
1700 struct f2fs_node *rn;
1701 struct f2fs_summary *sum_entry;
Chao Yu53826b52014-05-27 08:41:07 +08001702 struct inode *inode = sbi->sb->s_bdev->bd_inode;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001703 block_t addr;
Changman Leeb1a94e82013-11-15 10:42:51 +09001704 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
Chao Yu53826b52014-05-27 08:41:07 +08001705 struct page *pages[bio_blocks];
1706 int i, idx, last_offset, nrpages, err = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001707
1708 /* scan the node segment */
1709 last_offset = sbi->blocks_per_seg;
1710 addr = START_BLOCK(sbi, segno);
1711 sum_entry = &sum->entries[0];
1712
Gu Zheng12e374b2014-03-07 18:43:36 +08001713 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
Changman Leeb1a94e82013-11-15 10:42:51 +09001714 nrpages = min(last_offset - i, bio_blocks);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001715
Changman Leeb1a94e82013-11-15 10:42:51 +09001716 /* read ahead node pages */
Chao Yu53826b52014-05-27 08:41:07 +08001717 nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
Gu Zheng12e374b2014-03-07 18:43:36 +08001718 if (!nrpages)
1719 return -ENOMEM;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001720
Chao Yu53826b52014-05-27 08:41:07 +08001721 for (idx = 0; idx < nrpages; idx++) {
Gu Zheng12e374b2014-03-07 18:43:36 +08001722 if (err)
1723 goto skip;
Changman Leeb1a94e82013-11-15 10:42:51 +09001724
Chao Yu53826b52014-05-27 08:41:07 +08001725 lock_page(pages[idx]);
1726 if (unlikely(!PageUptodate(pages[idx]))) {
Changman Leeb1a94e82013-11-15 10:42:51 +09001727 err = -EIO;
1728 } else {
Chao Yu53826b52014-05-27 08:41:07 +08001729 rn = F2FS_NODE(pages[idx]);
Changman Leeb1a94e82013-11-15 10:42:51 +09001730 sum_entry->nid = rn->footer.nid;
1731 sum_entry->version = 0;
1732 sum_entry->ofs_in_node = 0;
1733 sum_entry++;
1734 }
Chao Yu53826b52014-05-27 08:41:07 +08001735 unlock_page(pages[idx]);
Gu Zheng12e374b2014-03-07 18:43:36 +08001736skip:
Chao Yu53826b52014-05-27 08:41:07 +08001737 page_cache_release(pages[idx]);
Changman Leeb1a94e82013-11-15 10:42:51 +09001738 }
Chao Yu53826b52014-05-27 08:41:07 +08001739
1740 invalidate_mapping_pages(inode->i_mapping, addr,
1741 addr + nrpages);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001742 }
Changman Leeb1a94e82013-11-15 10:42:51 +09001743 return err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001744}
1745
1746static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1747{
1748 struct f2fs_nm_info *nm_i = NM_I(sbi);
1749 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1750 struct f2fs_summary_block *sum = curseg->sum_blk;
1751 int i;
1752
1753 mutex_lock(&curseg->curseg_mutex);
1754
1755 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1756 mutex_unlock(&curseg->curseg_mutex);
1757 return false;
1758 }
1759
1760 for (i = 0; i < nats_in_cursum(sum); i++) {
1761 struct nat_entry *ne;
1762 struct f2fs_nat_entry raw_ne;
1763 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1764
1765 raw_ne = nat_in_journal(sum, i);
1766retry:
1767 write_lock(&nm_i->nat_tree_lock);
1768 ne = __lookup_nat_cache(nm_i, nid);
1769 if (ne) {
1770 __set_nat_cache_dirty(nm_i, ne);
1771 write_unlock(&nm_i->nat_tree_lock);
1772 continue;
1773 }
1774 ne = grab_nat_entry(nm_i, nid);
1775 if (!ne) {
1776 write_unlock(&nm_i->nat_tree_lock);
1777 goto retry;
1778 }
Chao Yu17eac8c2014-04-17 10:51:05 +08001779 node_info_from_raw_nat(&ne->ni, &raw_ne);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001780 __set_nat_cache_dirty(nm_i, ne);
1781 write_unlock(&nm_i->nat_tree_lock);
1782 }
1783 update_nats_in_cursum(sum, -i);
1784 mutex_unlock(&curseg->curseg_mutex);
1785 return true;
1786}
1787
1788/*
1789 * This function is called during the checkpointing process.
1790 */
1791void flush_nat_entries(struct f2fs_sb_info *sbi)
1792{
1793 struct f2fs_nm_info *nm_i = NM_I(sbi);
1794 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1795 struct f2fs_summary_block *sum = curseg->sum_blk;
Chao Yu48c561a2014-03-29 11:33:17 +08001796 struct nat_entry *ne, *cur;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001797 struct page *page = NULL;
1798 struct f2fs_nat_block *nat_blk = NULL;
1799 nid_t start_nid = 0, end_nid = 0;
1800 bool flushed;
1801
1802 flushed = flush_nats_in_journal(sbi);
1803
1804 if (!flushed)
1805 mutex_lock(&curseg->curseg_mutex);
1806
1807 /* 1) flush dirty nat caches */
Chao Yu48c561a2014-03-29 11:33:17 +08001808 list_for_each_entry_safe(ne, cur, &nm_i->dirty_nat_entries, list) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001809 nid_t nid;
1810 struct f2fs_nat_entry raw_ne;
1811 int offset = -1;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001812
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001813 if (nat_get_blkaddr(ne) == NEW_ADDR)
1814 continue;
Chao Yu48c561a2014-03-29 11:33:17 +08001815
1816 nid = nat_get_nid(ne);
1817
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001818 if (flushed)
1819 goto to_nat_page;
1820
1821 /* if there is room for nat enries in curseg->sumpage */
1822 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1823 if (offset >= 0) {
1824 raw_ne = nat_in_journal(sum, offset);
1825 goto flush_now;
1826 }
1827to_nat_page:
1828 if (!page || (start_nid > nid || nid > end_nid)) {
1829 if (page) {
1830 f2fs_put_page(page, 1);
1831 page = NULL;
1832 }
1833 start_nid = START_NID(nid);
1834 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1835
1836 /*
1837 * get nat block with dirty flag, increased reference
1838 * count, mapped and lock
1839 */
1840 page = get_next_nat_page(sbi, start_nid);
1841 nat_blk = page_address(page);
1842 }
1843
1844 f2fs_bug_on(!nat_blk);
1845 raw_ne = nat_blk->entries[nid - start_nid];
1846flush_now:
Chao Yu17eac8c2014-04-17 10:51:05 +08001847 raw_nat_from_node_info(&raw_ne, &ne->ni);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001848
1849 if (offset < 0) {
1850 nat_blk->entries[nid - start_nid] = raw_ne;
1851 } else {
1852 nat_in_journal(sum, offset) = raw_ne;
1853 nid_in_journal(sum, offset) = cpu_to_le32(nid);
1854 }
1855
1856 if (nat_get_blkaddr(ne) == NULL_ADDR &&
Jaegeuk Kim250c7692014-04-16 10:47:06 +09001857 add_free_nid(sbi, nid, false) <= 0) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001858 write_lock(&nm_i->nat_tree_lock);
1859 __del_from_nat_cache(nm_i, ne);
1860 write_unlock(&nm_i->nat_tree_lock);
1861 } else {
1862 write_lock(&nm_i->nat_tree_lock);
1863 __clear_nat_cache_dirty(nm_i, ne);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001864 write_unlock(&nm_i->nat_tree_lock);
1865 }
1866 }
1867 if (!flushed)
1868 mutex_unlock(&curseg->curseg_mutex);
1869 f2fs_put_page(page, 1);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001870}
1871
1872static int init_node_manager(struct f2fs_sb_info *sbi)
1873{
1874 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1875 struct f2fs_nm_info *nm_i = NM_I(sbi);
1876 unsigned char *version_bitmap;
1877 unsigned int nat_segs, nat_blocks;
1878
1879 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1880
1881 /* segment_count_nat includes pair segment so divide to 2. */
1882 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1883 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
Jaegeuk Kim8fa144b2014-02-17 12:44:20 +09001884
Jaegeuk Kim989f9142014-04-18 11:14:37 +09001885 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1886
Jaegeuk Kim8fa144b2014-02-17 12:44:20 +09001887 /* not used nids: 0, node, meta, (and root counted as valid node) */
Jaegeuk Kim989f9142014-04-18 11:14:37 +09001888 nm_i->available_nids = nm_i->max_nid - 3;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001889 nm_i->fcnt = 0;
1890 nm_i->nat_cnt = 0;
Jaegeuk Kim327c57d2014-03-19 13:31:37 +09001891 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001892
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001893 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001894 INIT_LIST_HEAD(&nm_i->free_nid_list);
1895 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1896 INIT_LIST_HEAD(&nm_i->nat_entries);
1897 INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1898
1899 mutex_init(&nm_i->build_lock);
1900 spin_lock_init(&nm_i->free_nid_list_lock);
1901 rwlock_init(&nm_i->nat_tree_lock);
1902
1903 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1904 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1905 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1906 if (!version_bitmap)
1907 return -EFAULT;
1908
1909 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1910 GFP_KERNEL);
1911 if (!nm_i->nat_bitmap)
1912 return -ENOMEM;
1913 return 0;
1914}
1915
1916int build_node_manager(struct f2fs_sb_info *sbi)
1917{
1918 int err;
1919
1920 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1921 if (!sbi->nm_info)
1922 return -ENOMEM;
1923
1924 err = init_node_manager(sbi);
1925 if (err)
1926 return err;
1927
1928 build_free_nids(sbi);
1929 return 0;
1930}
1931
1932void destroy_node_manager(struct f2fs_sb_info *sbi)
1933{
1934 struct f2fs_nm_info *nm_i = NM_I(sbi);
1935 struct free_nid *i, *next_i;
1936 struct nat_entry *natvec[NATVEC_SIZE];
1937 nid_t nid = 0;
1938 unsigned int found;
1939
1940 if (!nm_i)
1941 return;
1942
1943 /* destroy free nid list */
1944 spin_lock(&nm_i->free_nid_list_lock);
1945 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1946 f2fs_bug_on(i->state == NID_ALLOC);
Jaegeuk Kim8409a8a2014-02-21 14:29:35 +09001947 __del_from_free_nid_list(nm_i, i);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001948 nm_i->fcnt--;
Chao Yu784b1352014-04-02 08:55:00 +08001949 spin_unlock(&nm_i->free_nid_list_lock);
1950 kmem_cache_free(free_nid_slab, i);
1951 spin_lock(&nm_i->free_nid_list_lock);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001952 }
1953 f2fs_bug_on(nm_i->fcnt);
1954 spin_unlock(&nm_i->free_nid_list_lock);
1955
1956 /* destroy nat cache */
1957 write_lock(&nm_i->nat_tree_lock);
1958 while ((found = __gang_lookup_nat_cache(nm_i,
1959 nid, NATVEC_SIZE, natvec))) {
1960 unsigned idx;
Gu Zheng0c97ea92014-03-07 18:43:24 +08001961 nid = nat_get_nid(natvec[found - 1]) + 1;
1962 for (idx = 0; idx < found; idx++)
1963 __del_from_nat_cache(nm_i, natvec[idx]);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001964 }
1965 f2fs_bug_on(nm_i->nat_cnt);
1966 write_unlock(&nm_i->nat_tree_lock);
1967
1968 kfree(nm_i->nat_bitmap);
1969 sbi->nm_info = NULL;
1970 kfree(nm_i);
1971}
1972
1973int __init create_node_manager_caches(void)
1974{
1975 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
Gu Zhenge33dcea2014-03-07 18:43:28 +08001976 sizeof(struct nat_entry));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001977 if (!nat_entry_slab)
1978 return -ENOMEM;
1979
1980 free_nid_slab = f2fs_kmem_cache_create("free_nid",
Gu Zhenge33dcea2014-03-07 18:43:28 +08001981 sizeof(struct free_nid));
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001982 if (!free_nid_slab) {
1983 kmem_cache_destroy(nat_entry_slab);
1984 return -ENOMEM;
1985 }
1986 return 0;
1987}
1988
1989void destroy_node_manager_caches(void)
1990{
1991 kmem_cache_destroy(free_nid_slab);
1992 kmem_cache_destroy(nat_entry_slab);
1993}