blob: 1716de65682d6bbe942b9204fb1b7ec31361f31a [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
19#include <linux/prefetch.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include <trace/events/f2fs.h>
25
Changman Leeb1a94e82013-11-15 10:42:51 +090026static void f2fs_read_end_io(struct bio *bio, int err)
27{
28 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
29 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
30
31 do {
32 struct page *page = bvec->bv_page;
33
34 if (--bvec >= bio->bi_io_vec)
35 prefetchw(&bvec->bv_page->flags);
36
37 if (unlikely(!uptodate)) {
38 ClearPageUptodate(page);
39 SetPageError(page);
40 } else {
41 SetPageUptodate(page);
42 }
43 unlock_page(page);
44 } while (bvec >= bio->bi_io_vec);
45
46 bio_put(bio);
47}
48
49static void f2fs_write_end_io(struct bio *bio, int err)
50{
51 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
52 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
Jaegeuk Kim080c4632014-02-03 10:50:22 +090053 struct f2fs_sb_info *sbi = bio->bi_private;
Changman Leeb1a94e82013-11-15 10:42:51 +090054
55 do {
56 struct page *page = bvec->bv_page;
57
58 if (--bvec >= bio->bi_io_vec)
59 prefetchw(&bvec->bv_page->flags);
60
61 if (unlikely(!uptodate)) {
62 SetPageError(page);
63 set_bit(AS_EIO, &page->mapping->flags);
64 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
65 sbi->sb->s_flags |= MS_RDONLY;
66 }
67 end_page_writeback(page);
68 dec_page_count(sbi, F2FS_WRITEBACK);
69 } while (bvec >= bio->bi_io_vec);
70
Jaegeuk Kim080c4632014-02-03 10:50:22 +090071 if (sbi->wait_io) {
72 complete(sbi->wait_io);
73 sbi->wait_io = NULL;
74 }
Changman Leeb1a94e82013-11-15 10:42:51 +090075
76 if (!get_pages(sbi, F2FS_WRITEBACK) &&
77 !list_empty(&sbi->cp_wait.task_list))
78 wake_up(&sbi->cp_wait);
79
80 bio_put(bio);
81}
82
83/*
84 * Low-level block read/write IO operations.
85 */
86static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
87 int npages, bool is_read)
88{
89 struct bio *bio;
90
91 /* No failure on bio allocation */
92 bio = bio_alloc(GFP_NOIO, npages);
93
94 bio->bi_bdev = sbi->sb->s_bdev;
95 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
96 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
Jaegeuk Kim080c4632014-02-03 10:50:22 +090097 bio->bi_private = sbi;
Changman Leeb1a94e82013-11-15 10:42:51 +090098
99 return bio;
100}
101
102static void __submit_merged_bio(struct f2fs_bio_info *io)
103{
104 struct f2fs_io_info *fio = &io->fio;
105 int rw;
106
107 if (!io->bio)
108 return;
109
110 rw = fio->rw;
111
112 if (is_read_io(rw)) {
113 trace_f2fs_submit_read_bio(io->sbi->sb, rw,
114 fio->type, io->bio);
115 submit_bio(rw, io->bio);
116 } else {
117 trace_f2fs_submit_write_bio(io->sbi->sb, rw,
118 fio->type, io->bio);
119 /*
120 * META_FLUSH is only from the checkpoint procedure, and we
121 * should wait this metadata bio for FS consistency.
122 */
123 if (fio->type == META_FLUSH) {
124 DECLARE_COMPLETION_ONSTACK(wait);
Jaegeuk Kim080c4632014-02-03 10:50:22 +0900125 io->sbi->wait_io = &wait;
Changman Leeb1a94e82013-11-15 10:42:51 +0900126 submit_bio(rw, io->bio);
127 wait_for_completion(&wait);
128 } else {
129 submit_bio(rw, io->bio);
130 }
131 }
132
133 io->bio = NULL;
134}
135
136void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
137 enum page_type type, int rw)
138{
139 enum page_type btype = PAGE_TYPE_OF_BIO(type);
140 struct f2fs_bio_info *io;
141
142 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
143
144 mutex_lock(&io->io_mutex);
145
146 /* change META to META_FLUSH in the checkpoint procedure */
147 if (type >= META_FLUSH) {
148 io->fio.type = META_FLUSH;
149 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
150 }
151 __submit_merged_bio(io);
152 mutex_unlock(&io->io_mutex);
153}
154
155/*
156 * Fill the locked page with data located in the block address.
157 * Return unlocked page.
158 */
159int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
160 block_t blk_addr, int rw)
161{
162 struct bio *bio;
163
164 trace_f2fs_submit_page_bio(page, blk_addr, rw);
165
166 /* Allocate a new bio */
167 bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
168
169 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
170 bio_put(bio);
171 f2fs_put_page(page, 1);
172 return -EFAULT;
173 }
174
175 submit_bio(rw, bio);
176 return 0;
177}
178
179void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
180 block_t blk_addr, struct f2fs_io_info *fio)
181{
182 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
183 struct f2fs_bio_info *io;
184 bool is_read = is_read_io(fio->rw);
185
186 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
187
188 verify_block_addr(sbi, blk_addr);
189
190 mutex_lock(&io->io_mutex);
191
192 if (!is_read)
193 inc_page_count(sbi, F2FS_WRITEBACK);
194
195 if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
196 io->fio.rw != fio->rw))
197 __submit_merged_bio(io);
198alloc_new:
199 if (io->bio == NULL) {
200 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
201
202 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
203 io->fio = *fio;
204 }
205
206 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
207 PAGE_CACHE_SIZE) {
208 __submit_merged_bio(io);
209 goto alloc_new;
210 }
211
212 io->last_block_in_bio = blk_addr;
213
214 mutex_unlock(&io->io_mutex);
215 trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
216}
217
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800218/*
219 * Lock ordering for the change of data block address:
220 * ->data_page
221 * ->node_page
222 * update block addresses in the node page
223 */
224static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
225{
226 struct f2fs_node *rn;
227 __le32 *addr_array;
228 struct page *node_page = dn->node_page;
229 unsigned int ofs_in_node = dn->ofs_in_node;
230
Changman Leeb1a94e82013-11-15 10:42:51 +0900231 f2fs_wait_on_page_writeback(node_page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800232
233 rn = F2FS_NODE(node_page);
234
235 /* Get physical address of data block */
236 addr_array = blkaddr_in_node(rn);
237 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
238 set_page_dirty(node_page);
239}
240
241int reserve_new_block(struct dnode_of_data *dn)
242{
243 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
244
Changman Leeb1a94e82013-11-15 10:42:51 +0900245 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800246 return -EPERM;
Changman Leeb1a94e82013-11-15 10:42:51 +0900247 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800248 return -ENOSPC;
249
250 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
251
252 __set_data_blkaddr(dn, NEW_ADDR);
253 dn->data_blkaddr = NEW_ADDR;
Changman Leeb1a94e82013-11-15 10:42:51 +0900254 mark_inode_dirty(dn->inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800255 sync_inode_page(dn);
256 return 0;
257}
258
Changman Leeb1a94e82013-11-15 10:42:51 +0900259int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
260{
261 bool need_put = dn->inode_page ? false : true;
262 int err;
263
264 /* if inode_page exists, index should be zero */
265 f2fs_bug_on(!need_put && index);
266
267 err = get_dnode_of_data(dn, index, ALLOC_NODE);
268 if (err)
269 return err;
270
271 if (dn->data_blkaddr == NULL_ADDR)
272 err = reserve_new_block(dn);
273 if (err || need_put)
274 f2fs_put_dnode(dn);
275 return err;
276}
277
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800278static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
279 struct buffer_head *bh_result)
280{
281 struct f2fs_inode_info *fi = F2FS_I(inode);
282 pgoff_t start_fofs, end_fofs;
283 block_t start_blkaddr;
284
Changman Leeb1a94e82013-11-15 10:42:51 +0900285 if (is_inode_flag_set(fi, FI_NO_EXTENT))
286 return 0;
287
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800288 read_lock(&fi->ext.ext_lock);
289 if (fi->ext.len == 0) {
290 read_unlock(&fi->ext.ext_lock);
291 return 0;
292 }
293
294 stat_inc_total_hit(inode->i_sb);
295
296 start_fofs = fi->ext.fofs;
297 end_fofs = fi->ext.fofs + fi->ext.len - 1;
298 start_blkaddr = fi->ext.blk_addr;
299
300 if (pgofs >= start_fofs && pgofs <= end_fofs) {
301 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
302 size_t count;
303
304 clear_buffer_new(bh_result);
305 map_bh(bh_result, inode->i_sb,
306 start_blkaddr + pgofs - start_fofs);
307 count = end_fofs - pgofs + 1;
308 if (count < (UINT_MAX >> blkbits))
309 bh_result->b_size = (count << blkbits);
310 else
311 bh_result->b_size = UINT_MAX;
312
313 stat_inc_read_hit(inode->i_sb);
314 read_unlock(&fi->ext.ext_lock);
315 return 1;
316 }
317 read_unlock(&fi->ext.ext_lock);
318 return 0;
319}
320
321void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
322{
323 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
324 pgoff_t fofs, start_fofs, end_fofs;
325 block_t start_blkaddr, end_blkaddr;
Changman Leeb1a94e82013-11-15 10:42:51 +0900326 int need_update = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800327
328 f2fs_bug_on(blk_addr == NEW_ADDR);
329 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
330 dn->ofs_in_node;
331
332 /* Update the page address in the parent node */
333 __set_data_blkaddr(dn, blk_addr);
334
Changman Leeb1a94e82013-11-15 10:42:51 +0900335 if (is_inode_flag_set(fi, FI_NO_EXTENT))
336 return;
337
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800338 write_lock(&fi->ext.ext_lock);
339
340 start_fofs = fi->ext.fofs;
341 end_fofs = fi->ext.fofs + fi->ext.len - 1;
342 start_blkaddr = fi->ext.blk_addr;
343 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
344
345 /* Drop and initialize the matched extent */
346 if (fi->ext.len == 1 && fofs == start_fofs)
347 fi->ext.len = 0;
348
349 /* Initial extent */
350 if (fi->ext.len == 0) {
351 if (blk_addr != NULL_ADDR) {
352 fi->ext.fofs = fofs;
353 fi->ext.blk_addr = blk_addr;
354 fi->ext.len = 1;
355 }
356 goto end_update;
357 }
358
359 /* Front merge */
360 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
361 fi->ext.fofs--;
362 fi->ext.blk_addr--;
363 fi->ext.len++;
364 goto end_update;
365 }
366
367 /* Back merge */
368 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
369 fi->ext.len++;
370 goto end_update;
371 }
372
373 /* Split the existing extent */
374 if (fi->ext.len > 1 &&
375 fofs >= start_fofs && fofs <= end_fofs) {
376 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
377 fi->ext.len = fofs - start_fofs;
378 } else {
379 fi->ext.fofs = fofs + 1;
380 fi->ext.blk_addr = start_blkaddr +
381 fofs - start_fofs + 1;
382 fi->ext.len -= fofs - start_fofs + 1;
383 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900384 } else {
385 need_update = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800386 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800387
Changman Leeb1a94e82013-11-15 10:42:51 +0900388 /* Finally, if the extent is very fragmented, let's drop the cache. */
389 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
390 fi->ext.len = 0;
391 set_inode_flag(fi, FI_NO_EXTENT);
392 need_update = true;
393 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800394end_update:
395 write_unlock(&fi->ext.ext_lock);
Changman Leeb1a94e82013-11-15 10:42:51 +0900396 if (need_update)
397 sync_inode_page(dn);
398 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800399}
400
401struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
402{
403 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
404 struct address_space *mapping = inode->i_mapping;
405 struct dnode_of_data dn;
406 struct page *page;
407 int err;
408
409 page = find_get_page(mapping, index);
410 if (page && PageUptodate(page))
411 return page;
412 f2fs_put_page(page, 0);
413
414 set_new_dnode(&dn, inode, NULL, NULL, 0);
415 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
416 if (err)
417 return ERR_PTR(err);
418 f2fs_put_dnode(&dn);
419
420 if (dn.data_blkaddr == NULL_ADDR)
421 return ERR_PTR(-ENOENT);
422
423 /* By fallocate(), there is no cached page, but with NEW_ADDR */
Changman Leeb1a94e82013-11-15 10:42:51 +0900424 if (unlikely(dn.data_blkaddr == NEW_ADDR))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800425 return ERR_PTR(-EINVAL);
426
427 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
428 if (!page)
429 return ERR_PTR(-ENOMEM);
430
431 if (PageUptodate(page)) {
432 unlock_page(page);
433 return page;
434 }
435
Changman Leeb1a94e82013-11-15 10:42:51 +0900436 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800437 sync ? READ_SYNC : READA);
Changman Leeb1a94e82013-11-15 10:42:51 +0900438 if (err)
439 return ERR_PTR(err);
440
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800441 if (sync) {
442 wait_on_page_locked(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900443 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800444 f2fs_put_page(page, 0);
445 return ERR_PTR(-EIO);
446 }
447 }
448 return page;
449}
450
451/*
452 * If it tries to access a hole, return an error.
453 * Because, the callers, functions in dir.c and GC, should be able to know
454 * whether this page exists or not.
455 */
456struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
457{
458 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
459 struct address_space *mapping = inode->i_mapping;
460 struct dnode_of_data dn;
461 struct page *page;
462 int err;
463
464repeat:
465 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
466 if (!page)
467 return ERR_PTR(-ENOMEM);
468
469 set_new_dnode(&dn, inode, NULL, NULL, 0);
470 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
471 if (err) {
472 f2fs_put_page(page, 1);
473 return ERR_PTR(err);
474 }
475 f2fs_put_dnode(&dn);
476
Changman Leeb1a94e82013-11-15 10:42:51 +0900477 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800478 f2fs_put_page(page, 1);
479 return ERR_PTR(-ENOENT);
480 }
481
482 if (PageUptodate(page))
483 return page;
484
485 /*
486 * A new dentry page is allocated but not able to be written, since its
487 * new inode page couldn't be allocated due to -ENOSPC.
488 * In such the case, its blkaddr can be remained as NEW_ADDR.
489 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
490 */
491 if (dn.data_blkaddr == NEW_ADDR) {
492 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
493 SetPageUptodate(page);
494 return page;
495 }
496
Changman Leeb1a94e82013-11-15 10:42:51 +0900497 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800498 if (err)
499 return ERR_PTR(err);
500
501 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900502 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800503 f2fs_put_page(page, 1);
504 return ERR_PTR(-EIO);
505 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900506 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800507 f2fs_put_page(page, 1);
508 goto repeat;
509 }
510 return page;
511}
512
513/*
514 * Caller ensures that this data page is never allocated.
515 * A new zero-filled data page is allocated in the page cache.
516 *
Changman Leeb1a94e82013-11-15 10:42:51 +0900517 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
518 * f2fs_unlock_op().
519 * Note that, ipage is set only by make_empty_dir.
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800520 */
521struct page *get_new_data_page(struct inode *inode,
Changman Leeb1a94e82013-11-15 10:42:51 +0900522 struct page *ipage, pgoff_t index, bool new_i_size)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800523{
524 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
525 struct address_space *mapping = inode->i_mapping;
526 struct page *page;
527 struct dnode_of_data dn;
528 int err;
529
Changman Leeb1a94e82013-11-15 10:42:51 +0900530 set_new_dnode(&dn, inode, ipage, NULL, 0);
531 err = f2fs_reserve_block(&dn, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800532 if (err)
533 return ERR_PTR(err);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800534repeat:
535 page = grab_cache_page(mapping, index);
Changman Leeb1a94e82013-11-15 10:42:51 +0900536 if (!page) {
537 err = -ENOMEM;
538 goto put_err;
539 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800540
541 if (PageUptodate(page))
542 return page;
543
544 if (dn.data_blkaddr == NEW_ADDR) {
545 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
546 SetPageUptodate(page);
547 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900548 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
549 READ_SYNC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800550 if (err)
Changman Leeb1a94e82013-11-15 10:42:51 +0900551 goto put_err;
552
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800553 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900554 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800555 f2fs_put_page(page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900556 err = -EIO;
557 goto put_err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800558 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900559 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800560 f2fs_put_page(page, 1);
561 goto repeat;
562 }
563 }
564
565 if (new_i_size &&
566 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
567 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
568 /* Only the directory inode sets new_i_size */
569 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800570 }
571 return page;
Changman Leeb1a94e82013-11-15 10:42:51 +0900572
573put_err:
574 f2fs_put_dnode(&dn);
575 return ERR_PTR(err);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800576}
577
Changman Leeb1a94e82013-11-15 10:42:51 +0900578static int __allocate_data_block(struct dnode_of_data *dn)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800579{
Changman Leeb1a94e82013-11-15 10:42:51 +0900580 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
581 struct f2fs_summary sum;
582 block_t new_blkaddr;
583 struct node_info ni;
584 int type;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800585
Changman Leeb1a94e82013-11-15 10:42:51 +0900586 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
587 return -EPERM;
588 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
589 return -ENOSPC;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800590
Changman Leeb1a94e82013-11-15 10:42:51 +0900591 __set_data_blkaddr(dn, NEW_ADDR);
592 dn->data_blkaddr = NEW_ADDR;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800593
Changman Leeb1a94e82013-11-15 10:42:51 +0900594 get_node_info(sbi, dn->nid, &ni);
595 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800596
Changman Leeb1a94e82013-11-15 10:42:51 +0900597 type = CURSEG_WARM_DATA;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800598
Changman Leeb1a94e82013-11-15 10:42:51 +0900599 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800600
Changman Leeb1a94e82013-11-15 10:42:51 +0900601 /* direct IO doesn't use extent cache to maximize the performance */
602 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
603 update_extent_cache(new_blkaddr, dn);
604 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800605
Changman Leeb1a94e82013-11-15 10:42:51 +0900606 dn->data_blkaddr = new_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800607 return 0;
608}
609
610/*
Changman Leeb1a94e82013-11-15 10:42:51 +0900611 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
612 * If original data blocks are allocated, then give them to blockdev.
613 * Otherwise,
614 * a. preallocate requested block addresses
615 * b. do not use extent cache for better performance
616 * c. give the block addresses to blockdev
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800617 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900618static int get_data_block(struct inode *inode, sector_t iblock,
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800619 struct buffer_head *bh_result, int create)
620{
Changman Leeb1a94e82013-11-15 10:42:51 +0900621 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800622 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
623 unsigned maxblocks = bh_result->b_size >> blkbits;
624 struct dnode_of_data dn;
Changman Leeb1a94e82013-11-15 10:42:51 +0900625 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
626 pgoff_t pgofs, end_offset;
627 int err = 0, ofs = 1;
628 bool allocated = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800629
630 /* Get the page offset from the block offset(iblock) */
631 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
632
Changman Leeb1a94e82013-11-15 10:42:51 +0900633 if (check_extent_cache(inode, pgofs, bh_result))
634 goto out;
635
636 if (create)
637 f2fs_lock_op(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800638
639 /* When reading holes, we need its node page */
640 set_new_dnode(&dn, inode, NULL, NULL, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900641 err = get_dnode_of_data(&dn, pgofs, mode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800642 if (err) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900643 if (err == -ENOENT)
644 err = 0;
645 goto unlock_out;
646 }
647 if (dn.data_blkaddr == NEW_ADDR)
648 goto put_out;
649
650 if (dn.data_blkaddr != NULL_ADDR) {
651 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
652 } else if (create) {
653 err = __allocate_data_block(&dn);
654 if (err)
655 goto put_out;
656 allocated = true;
657 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
658 } else {
659 goto put_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800660 }
661
Changman Leeb1a94e82013-11-15 10:42:51 +0900662 end_offset = IS_INODE(dn.node_page) ?
663 ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK;
664 bh_result->b_size = (((size_t)1) << blkbits);
665 dn.ofs_in_node++;
666 pgofs++;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800667
Changman Leeb1a94e82013-11-15 10:42:51 +0900668get_next:
669 if (dn.ofs_in_node >= end_offset) {
670 if (allocated)
671 sync_inode_page(&dn);
672 allocated = false;
673 f2fs_put_dnode(&dn);
674
675 set_new_dnode(&dn, inode, NULL, NULL, 0);
676 err = get_dnode_of_data(&dn, pgofs, mode);
677 if (err) {
678 if (err == -ENOENT)
679 err = 0;
680 goto unlock_out;
681 }
682 if (dn.data_blkaddr == NEW_ADDR)
683 goto put_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800684
685 end_offset = IS_INODE(dn.node_page) ?
Changman Leeb1a94e82013-11-15 10:42:51 +0900686 ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800687 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900688
689 if (maxblocks > (bh_result->b_size >> blkbits)) {
690 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
691 if (blkaddr == NULL_ADDR && create) {
692 err = __allocate_data_block(&dn);
693 if (err)
694 goto sync_out;
695 allocated = true;
696 blkaddr = dn.data_blkaddr;
697 }
698 /* Give more consecutive addresses for the read ahead */
699 if (blkaddr == (bh_result->b_blocknr + ofs)) {
700 ofs++;
701 dn.ofs_in_node++;
702 pgofs++;
703 bh_result->b_size += (((size_t)1) << blkbits);
704 goto get_next;
705 }
706 }
707sync_out:
708 if (allocated)
709 sync_inode_page(&dn);
710put_out:
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800711 f2fs_put_dnode(&dn);
Changman Leeb1a94e82013-11-15 10:42:51 +0900712unlock_out:
713 if (create)
714 f2fs_unlock_op(sbi);
715out:
716 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
717 return err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800718}
719
720static int f2fs_read_data_page(struct file *file, struct page *page)
721{
Changman Leeb1a94e82013-11-15 10:42:51 +0900722 struct inode *inode = page->mapping->host;
723 int ret;
724
725 /* If the file has inline data, try to read it directlly */
726 if (f2fs_has_inline_data(inode))
727 ret = f2fs_read_inline_data(inode, page);
728 else
729 ret = mpage_readpage(page, get_data_block);
730
731 return ret;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800732}
733
734static int f2fs_read_data_pages(struct file *file,
735 struct address_space *mapping,
736 struct list_head *pages, unsigned nr_pages)
737{
Changman Leeb1a94e82013-11-15 10:42:51 +0900738 struct inode *inode = file->f_mapping->host;
739
740 /* If the file has inline data, skip readpages */
741 if (f2fs_has_inline_data(inode))
742 return 0;
743
744 return mpage_readpages(mapping, pages, nr_pages, get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800745}
746
Changman Leeb1a94e82013-11-15 10:42:51 +0900747int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800748{
749 struct inode *inode = page->mapping->host;
Changman Leeb1a94e82013-11-15 10:42:51 +0900750 block_t old_blkaddr, new_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800751 struct dnode_of_data dn;
752 int err = 0;
753
754 set_new_dnode(&dn, inode, NULL, NULL, 0);
755 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
756 if (err)
757 return err;
758
Changman Leeb1a94e82013-11-15 10:42:51 +0900759 old_blkaddr = dn.data_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800760
761 /* This page is already truncated */
Changman Leeb1a94e82013-11-15 10:42:51 +0900762 if (old_blkaddr == NULL_ADDR)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800763 goto out_writepage;
764
765 set_page_writeback(page);
766
767 /*
768 * If current allocation needs SSR,
769 * it had better in-place writes for updated data.
770 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900771 if (unlikely(old_blkaddr != NEW_ADDR &&
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800772 !is_cold_data(page) &&
773 need_inplace_update(inode))) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900774 rewrite_data_page(page, old_blkaddr, fio);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800775 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900776 write_data_page(page, &dn, &new_blkaddr, fio);
777 update_extent_cache(new_blkaddr, &dn);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800778 }
779out_writepage:
780 f2fs_put_dnode(&dn);
781 return err;
782}
783
784static int f2fs_write_data_page(struct page *page,
785 struct writeback_control *wbc)
786{
787 struct inode *inode = page->mapping->host;
788 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
789 loff_t i_size = i_size_read(inode);
790 const pgoff_t end_index = ((unsigned long long) i_size)
791 >> PAGE_CACHE_SHIFT;
Changman Leeb1a94e82013-11-15 10:42:51 +0900792 unsigned offset = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800793 bool need_balance_fs = false;
794 int err = 0;
Changman Leeb1a94e82013-11-15 10:42:51 +0900795 struct f2fs_io_info fio = {
796 .type = DATA,
797 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
798 };
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800799
800 if (page->index < end_index)
801 goto write;
802
803 /*
804 * If the offset is out-of-range of file size,
805 * this page does not have to be written to disk.
806 */
807 offset = i_size & (PAGE_CACHE_SIZE - 1);
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900808 if ((page->index >= end_index + 1) || !offset)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800809 goto out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800810
811 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
812write:
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900813 if (unlikely(sbi->por_doing))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800814 goto redirty_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800815
816 /* Dentry blocks are controlled by checkpoint */
817 if (S_ISDIR(inode->i_mode)) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900818 err = do_write_data_page(page, &fio);
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900819 goto done;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800820 }
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900821
822 if (!wbc->for_reclaim)
823 need_balance_fs = true;
824 else if (has_not_enough_free_secs(sbi, 0))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800825 goto redirty_out;
826
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900827 f2fs_lock_op(sbi);
828 if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
829 err = f2fs_write_inline_data(inode, page, offset);
830 else
831 err = do_write_data_page(page, &fio);
832 f2fs_unlock_op(sbi);
833done:
834 if (err && err != -ENOENT)
835 goto redirty_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800836
837 clear_cold_data(page);
838out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900839 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800840 unlock_page(page);
841 if (need_balance_fs)
842 f2fs_balance_fs(sbi);
Jaegeuk Kim8edabc72014-04-24 09:49:52 +0900843 if (wbc->for_reclaim)
844 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800845 return 0;
846
847redirty_out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900848 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900849 return AOP_WRITEPAGE_ACTIVATE;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800850}
851
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800852static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
853 void *data)
854{
855 struct address_space *mapping = data;
856 int ret = mapping->a_ops->writepage(page, wbc);
857 mapping_set_error(mapping, ret);
858 return ret;
859}
860
861static int f2fs_write_data_pages(struct address_space *mapping,
862 struct writeback_control *wbc)
863{
864 struct inode *inode = mapping->host;
865 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
866 bool locked = false;
867 int ret;
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900868 long diff;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800869
870 /* deal with chardevs and other special file */
871 if (!mapping->a_ops->writepage)
872 return 0;
873
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +0900874 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kim250c7692014-04-16 10:47:06 +0900875 get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA) &&
876 available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kim823d59f2014-03-18 13:43:05 +0900877 goto skip_write;
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +0900878
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900879 diff = nr_pages_to_write(sbi, DATA, wbc);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800880
881 if (!S_ISDIR(inode->i_mode)) {
882 mutex_lock(&sbi->writepages);
883 locked = true;
884 }
885 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
886 if (locked)
887 mutex_unlock(&sbi->writepages);
Changman Leeb1a94e82013-11-15 10:42:51 +0900888
889 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800890
891 remove_dirty_dir_inode(inode);
892
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900893 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800894 return ret;
Jaegeuk Kim823d59f2014-03-18 13:43:05 +0900895
896skip_write:
897 wbc->pages_skipped += get_dirty_dents(inode);
898 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800899}
900
901static int f2fs_write_begin(struct file *file, struct address_space *mapping,
902 loff_t pos, unsigned len, unsigned flags,
903 struct page **pagep, void **fsdata)
904{
905 struct inode *inode = mapping->host;
906 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
907 struct page *page;
908 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
909 struct dnode_of_data dn;
910 int err = 0;
911
912 f2fs_balance_fs(sbi);
913repeat:
Changman Leeb1a94e82013-11-15 10:42:51 +0900914 err = f2fs_convert_inline_data(inode, pos + len);
915 if (err)
916 return err;
917
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800918 page = grab_cache_page_write_begin(mapping, index, flags);
919 if (!page)
920 return -ENOMEM;
921 *pagep = page;
922
Changman Leeb1a94e82013-11-15 10:42:51 +0900923 if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA)
924 goto inline_data;
925
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800926 f2fs_lock_op(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800927 set_new_dnode(&dn, inode, NULL, NULL, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900928 err = f2fs_reserve_block(&dn, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800929 f2fs_unlock_op(sbi);
930
Changman Leeb1a94e82013-11-15 10:42:51 +0900931 if (err) {
932 f2fs_put_page(page, 1);
933 return err;
934 }
935inline_data:
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800936 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
937 return 0;
938
939 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
940 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
941 unsigned end = start + len;
942
943 /* Reading beyond i_size is simple: memset to zero */
944 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
945 goto out;
946 }
947
948 if (dn.data_blkaddr == NEW_ADDR) {
949 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
950 } else {
Chao Yu3d1d6d92014-03-29 15:30:40 +0800951 if (f2fs_has_inline_data(inode)) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900952 err = f2fs_read_inline_data(inode, page);
Chao Yu3d1d6d92014-03-29 15:30:40 +0800953 if (err) {
954 page_cache_release(page);
955 return err;
956 }
957 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900958 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
959 READ_SYNC);
Chao Yu3d1d6d92014-03-29 15:30:40 +0800960 if (err)
961 return err;
962 }
963
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800964 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900965 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800966 f2fs_put_page(page, 1);
967 return -EIO;
968 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900969 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800970 f2fs_put_page(page, 1);
971 goto repeat;
972 }
973 }
974out:
975 SetPageUptodate(page);
976 clear_cold_data(page);
977 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800978}
979
980static int f2fs_write_end(struct file *file,
981 struct address_space *mapping,
982 loff_t pos, unsigned len, unsigned copied,
983 struct page *page, void *fsdata)
984{
985 struct inode *inode = page->mapping->host;
986
987 SetPageUptodate(page);
988 set_page_dirty(page);
989
990 if (pos + copied > i_size_read(inode)) {
991 i_size_write(inode, pos + copied);
992 mark_inode_dirty(inode);
993 update_inode_page(inode);
994 }
995
Changman Leeb1a94e82013-11-15 10:42:51 +0900996 f2fs_put_page(page, 1);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800997 return copied;
998}
999
Changman Leeb1a94e82013-11-15 10:42:51 +09001000static int check_direct_IO(struct inode *inode, int rw,
1001 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1002{
1003 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
1004 int i;
1005
1006 if (rw == READ)
1007 return 0;
1008
1009 if (offset & blocksize_mask)
1010 return -EINVAL;
1011
1012 for (i = 0; i < nr_segs; i++)
1013 if (iov[i].iov_len & blocksize_mask)
1014 return -EINVAL;
1015 return 0;
1016}
1017
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001018static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
1019 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1020{
1021 struct file *file = iocb->ki_filp;
1022 struct inode *inode = file->f_mapping->host;
1023
Changman Leeb1a94e82013-11-15 10:42:51 +09001024 /* Let buffer I/O handle the inline data case. */
1025 if (f2fs_has_inline_data(inode))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001026 return 0;
1027
Changman Leeb1a94e82013-11-15 10:42:51 +09001028 if (check_direct_IO(inode, rw, iov, offset, nr_segs))
1029 return 0;
1030
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001031 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
Changman Leeb1a94e82013-11-15 10:42:51 +09001032 get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001033}
1034
1035static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
1036{
1037 struct inode *inode = page->mapping->host;
Jaegeuk Kim9694e662014-02-07 10:00:06 +09001038 if (PageDirty(page))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001039 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001040 ClearPagePrivate(page);
1041}
1042
1043static int f2fs_release_data_page(struct page *page, gfp_t wait)
1044{
1045 ClearPagePrivate(page);
1046 return 1;
1047}
1048
1049static int f2fs_set_data_page_dirty(struct page *page)
1050{
1051 struct address_space *mapping = page->mapping;
1052 struct inode *inode = mapping->host;
1053
1054 trace_f2fs_set_page_dirty(page, DATA);
1055
1056 SetPageUptodate(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001057 mark_inode_dirty(inode);
1058
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001059 if (!PageDirty(page)) {
1060 __set_page_dirty_nobuffers(page);
1061 set_dirty_dir_page(inode, page);
1062 return 1;
1063 }
1064 return 0;
1065}
1066
1067static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1068{
Chao Yu11237352014-04-22 13:34:01 +08001069 struct inode *inode = mapping->host;
1070
1071 if (f2fs_has_inline_data(inode))
1072 return 0;
1073
Changman Leeb1a94e82013-11-15 10:42:51 +09001074 return generic_block_bmap(mapping, block, get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001075}
1076
1077const struct address_space_operations f2fs_dblock_aops = {
1078 .readpage = f2fs_read_data_page,
1079 .readpages = f2fs_read_data_pages,
1080 .writepage = f2fs_write_data_page,
1081 .writepages = f2fs_write_data_pages,
1082 .write_begin = f2fs_write_begin,
1083 .write_end = f2fs_write_end,
1084 .set_page_dirty = f2fs_set_data_page_dirty,
1085 .invalidatepage = f2fs_invalidate_data_page,
1086 .releasepage = f2fs_release_data_page,
1087 .direct_IO = f2fs_direct_IO,
1088 .bmap = f2fs_bmap,
1089};