blob: f9e1326549b3002000149ba43b88781e9cb54f62 [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
19#include <linux/prefetch.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include <trace/events/f2fs.h>
25
Changman Leeb1a94e82013-11-15 10:42:51 +090026static void f2fs_read_end_io(struct bio *bio, int err)
27{
28 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
29 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
30
31 do {
32 struct page *page = bvec->bv_page;
33
34 if (--bvec >= bio->bi_io_vec)
35 prefetchw(&bvec->bv_page->flags);
36
37 if (unlikely(!uptodate)) {
38 ClearPageUptodate(page);
39 SetPageError(page);
40 } else {
41 SetPageUptodate(page);
42 }
43 unlock_page(page);
44 } while (bvec >= bio->bi_io_vec);
45
46 bio_put(bio);
47}
48
49static void f2fs_write_end_io(struct bio *bio, int err)
50{
51 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
52 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
Jaegeuk Kim080c4632014-02-03 10:50:22 +090053 struct f2fs_sb_info *sbi = bio->bi_private;
Changman Leeb1a94e82013-11-15 10:42:51 +090054
55 do {
56 struct page *page = bvec->bv_page;
57
58 if (--bvec >= bio->bi_io_vec)
59 prefetchw(&bvec->bv_page->flags);
60
61 if (unlikely(!uptodate)) {
62 SetPageError(page);
63 set_bit(AS_EIO, &page->mapping->flags);
64 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
65 sbi->sb->s_flags |= MS_RDONLY;
66 }
67 end_page_writeback(page);
68 dec_page_count(sbi, F2FS_WRITEBACK);
69 } while (bvec >= bio->bi_io_vec);
70
Jaegeuk Kim080c4632014-02-03 10:50:22 +090071 if (sbi->wait_io) {
72 complete(sbi->wait_io);
73 sbi->wait_io = NULL;
74 }
Changman Leeb1a94e82013-11-15 10:42:51 +090075
76 if (!get_pages(sbi, F2FS_WRITEBACK) &&
77 !list_empty(&sbi->cp_wait.task_list))
78 wake_up(&sbi->cp_wait);
79
80 bio_put(bio);
81}
82
83/*
84 * Low-level block read/write IO operations.
85 */
86static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
87 int npages, bool is_read)
88{
89 struct bio *bio;
90
91 /* No failure on bio allocation */
92 bio = bio_alloc(GFP_NOIO, npages);
93
94 bio->bi_bdev = sbi->sb->s_bdev;
95 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
96 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
Jaegeuk Kim080c4632014-02-03 10:50:22 +090097 bio->bi_private = sbi;
Changman Leeb1a94e82013-11-15 10:42:51 +090098
99 return bio;
100}
101
102static void __submit_merged_bio(struct f2fs_bio_info *io)
103{
104 struct f2fs_io_info *fio = &io->fio;
105 int rw;
106
107 if (!io->bio)
108 return;
109
110 rw = fio->rw;
111
112 if (is_read_io(rw)) {
113 trace_f2fs_submit_read_bio(io->sbi->sb, rw,
114 fio->type, io->bio);
115 submit_bio(rw, io->bio);
116 } else {
117 trace_f2fs_submit_write_bio(io->sbi->sb, rw,
118 fio->type, io->bio);
119 /*
120 * META_FLUSH is only from the checkpoint procedure, and we
121 * should wait this metadata bio for FS consistency.
122 */
123 if (fio->type == META_FLUSH) {
124 DECLARE_COMPLETION_ONSTACK(wait);
Jaegeuk Kim080c4632014-02-03 10:50:22 +0900125 io->sbi->wait_io = &wait;
Changman Leeb1a94e82013-11-15 10:42:51 +0900126 submit_bio(rw, io->bio);
127 wait_for_completion(&wait);
128 } else {
129 submit_bio(rw, io->bio);
130 }
131 }
132
133 io->bio = NULL;
134}
135
136void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
137 enum page_type type, int rw)
138{
139 enum page_type btype = PAGE_TYPE_OF_BIO(type);
140 struct f2fs_bio_info *io;
141
142 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
143
144 mutex_lock(&io->io_mutex);
145
146 /* change META to META_FLUSH in the checkpoint procedure */
147 if (type >= META_FLUSH) {
148 io->fio.type = META_FLUSH;
149 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
150 }
151 __submit_merged_bio(io);
152 mutex_unlock(&io->io_mutex);
153}
154
155/*
156 * Fill the locked page with data located in the block address.
157 * Return unlocked page.
158 */
159int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
160 block_t blk_addr, int rw)
161{
162 struct bio *bio;
163
164 trace_f2fs_submit_page_bio(page, blk_addr, rw);
165
166 /* Allocate a new bio */
167 bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
168
169 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
170 bio_put(bio);
171 f2fs_put_page(page, 1);
172 return -EFAULT;
173 }
174
175 submit_bio(rw, bio);
176 return 0;
177}
178
179void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
180 block_t blk_addr, struct f2fs_io_info *fio)
181{
182 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
183 struct f2fs_bio_info *io;
184 bool is_read = is_read_io(fio->rw);
185
186 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
187
188 verify_block_addr(sbi, blk_addr);
189
190 mutex_lock(&io->io_mutex);
191
192 if (!is_read)
193 inc_page_count(sbi, F2FS_WRITEBACK);
194
195 if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
196 io->fio.rw != fio->rw))
197 __submit_merged_bio(io);
198alloc_new:
199 if (io->bio == NULL) {
200 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
201
202 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
203 io->fio = *fio;
204 }
205
206 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
207 PAGE_CACHE_SIZE) {
208 __submit_merged_bio(io);
209 goto alloc_new;
210 }
211
212 io->last_block_in_bio = blk_addr;
213
214 mutex_unlock(&io->io_mutex);
215 trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
216}
217
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800218/*
219 * Lock ordering for the change of data block address:
220 * ->data_page
221 * ->node_page
222 * update block addresses in the node page
223 */
224static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
225{
226 struct f2fs_node *rn;
227 __le32 *addr_array;
228 struct page *node_page = dn->node_page;
229 unsigned int ofs_in_node = dn->ofs_in_node;
230
Changman Leeb1a94e82013-11-15 10:42:51 +0900231 f2fs_wait_on_page_writeback(node_page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800232
233 rn = F2FS_NODE(node_page);
234
235 /* Get physical address of data block */
236 addr_array = blkaddr_in_node(rn);
237 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
238 set_page_dirty(node_page);
239}
240
241int reserve_new_block(struct dnode_of_data *dn)
242{
243 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
244
Changman Leeb1a94e82013-11-15 10:42:51 +0900245 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800246 return -EPERM;
Changman Leeb1a94e82013-11-15 10:42:51 +0900247 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800248 return -ENOSPC;
249
250 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
251
252 __set_data_blkaddr(dn, NEW_ADDR);
253 dn->data_blkaddr = NEW_ADDR;
Changman Leeb1a94e82013-11-15 10:42:51 +0900254 mark_inode_dirty(dn->inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800255 sync_inode_page(dn);
256 return 0;
257}
258
Changman Leeb1a94e82013-11-15 10:42:51 +0900259int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
260{
261 bool need_put = dn->inode_page ? false : true;
262 int err;
263
264 /* if inode_page exists, index should be zero */
265 f2fs_bug_on(!need_put && index);
266
267 err = get_dnode_of_data(dn, index, ALLOC_NODE);
268 if (err)
269 return err;
270
271 if (dn->data_blkaddr == NULL_ADDR)
272 err = reserve_new_block(dn);
273 if (err || need_put)
274 f2fs_put_dnode(dn);
275 return err;
276}
277
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800278static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
279 struct buffer_head *bh_result)
280{
281 struct f2fs_inode_info *fi = F2FS_I(inode);
282 pgoff_t start_fofs, end_fofs;
283 block_t start_blkaddr;
284
Changman Leeb1a94e82013-11-15 10:42:51 +0900285 if (is_inode_flag_set(fi, FI_NO_EXTENT))
286 return 0;
287
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800288 read_lock(&fi->ext.ext_lock);
289 if (fi->ext.len == 0) {
290 read_unlock(&fi->ext.ext_lock);
291 return 0;
292 }
293
294 stat_inc_total_hit(inode->i_sb);
295
296 start_fofs = fi->ext.fofs;
297 end_fofs = fi->ext.fofs + fi->ext.len - 1;
298 start_blkaddr = fi->ext.blk_addr;
299
300 if (pgofs >= start_fofs && pgofs <= end_fofs) {
301 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
302 size_t count;
303
304 clear_buffer_new(bh_result);
305 map_bh(bh_result, inode->i_sb,
306 start_blkaddr + pgofs - start_fofs);
307 count = end_fofs - pgofs + 1;
308 if (count < (UINT_MAX >> blkbits))
309 bh_result->b_size = (count << blkbits);
310 else
311 bh_result->b_size = UINT_MAX;
312
313 stat_inc_read_hit(inode->i_sb);
314 read_unlock(&fi->ext.ext_lock);
315 return 1;
316 }
317 read_unlock(&fi->ext.ext_lock);
318 return 0;
319}
320
321void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
322{
323 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
324 pgoff_t fofs, start_fofs, end_fofs;
325 block_t start_blkaddr, end_blkaddr;
Changman Leeb1a94e82013-11-15 10:42:51 +0900326 int need_update = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800327
328 f2fs_bug_on(blk_addr == NEW_ADDR);
329 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
330 dn->ofs_in_node;
331
332 /* Update the page address in the parent node */
333 __set_data_blkaddr(dn, blk_addr);
334
Changman Leeb1a94e82013-11-15 10:42:51 +0900335 if (is_inode_flag_set(fi, FI_NO_EXTENT))
336 return;
337
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800338 write_lock(&fi->ext.ext_lock);
339
340 start_fofs = fi->ext.fofs;
341 end_fofs = fi->ext.fofs + fi->ext.len - 1;
342 start_blkaddr = fi->ext.blk_addr;
343 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
344
345 /* Drop and initialize the matched extent */
346 if (fi->ext.len == 1 && fofs == start_fofs)
347 fi->ext.len = 0;
348
349 /* Initial extent */
350 if (fi->ext.len == 0) {
351 if (blk_addr != NULL_ADDR) {
352 fi->ext.fofs = fofs;
353 fi->ext.blk_addr = blk_addr;
354 fi->ext.len = 1;
355 }
356 goto end_update;
357 }
358
359 /* Front merge */
360 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
361 fi->ext.fofs--;
362 fi->ext.blk_addr--;
363 fi->ext.len++;
364 goto end_update;
365 }
366
367 /* Back merge */
368 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
369 fi->ext.len++;
370 goto end_update;
371 }
372
373 /* Split the existing extent */
374 if (fi->ext.len > 1 &&
375 fofs >= start_fofs && fofs <= end_fofs) {
376 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
377 fi->ext.len = fofs - start_fofs;
378 } else {
379 fi->ext.fofs = fofs + 1;
380 fi->ext.blk_addr = start_blkaddr +
381 fofs - start_fofs + 1;
382 fi->ext.len -= fofs - start_fofs + 1;
383 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900384 } else {
385 need_update = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800386 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800387
Changman Leeb1a94e82013-11-15 10:42:51 +0900388 /* Finally, if the extent is very fragmented, let's drop the cache. */
389 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
390 fi->ext.len = 0;
391 set_inode_flag(fi, FI_NO_EXTENT);
392 need_update = true;
393 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800394end_update:
395 write_unlock(&fi->ext.ext_lock);
Changman Leeb1a94e82013-11-15 10:42:51 +0900396 if (need_update)
397 sync_inode_page(dn);
398 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800399}
400
401struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
402{
403 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
404 struct address_space *mapping = inode->i_mapping;
405 struct dnode_of_data dn;
406 struct page *page;
407 int err;
408
409 page = find_get_page(mapping, index);
410 if (page && PageUptodate(page))
411 return page;
412 f2fs_put_page(page, 0);
413
414 set_new_dnode(&dn, inode, NULL, NULL, 0);
415 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
416 if (err)
417 return ERR_PTR(err);
418 f2fs_put_dnode(&dn);
419
420 if (dn.data_blkaddr == NULL_ADDR)
421 return ERR_PTR(-ENOENT);
422
423 /* By fallocate(), there is no cached page, but with NEW_ADDR */
Changman Leeb1a94e82013-11-15 10:42:51 +0900424 if (unlikely(dn.data_blkaddr == NEW_ADDR))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800425 return ERR_PTR(-EINVAL);
426
Jaegeuk Kim767fa502014-04-29 17:35:10 +0900427 page = grab_cache_page(mapping, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800428 if (!page)
429 return ERR_PTR(-ENOMEM);
430
431 if (PageUptodate(page)) {
432 unlock_page(page);
433 return page;
434 }
435
Changman Leeb1a94e82013-11-15 10:42:51 +0900436 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800437 sync ? READ_SYNC : READA);
Changman Leeb1a94e82013-11-15 10:42:51 +0900438 if (err)
439 return ERR_PTR(err);
440
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800441 if (sync) {
442 wait_on_page_locked(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900443 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800444 f2fs_put_page(page, 0);
445 return ERR_PTR(-EIO);
446 }
447 }
448 return page;
449}
450
451/*
452 * If it tries to access a hole, return an error.
453 * Because, the callers, functions in dir.c and GC, should be able to know
454 * whether this page exists or not.
455 */
456struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
457{
458 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
459 struct address_space *mapping = inode->i_mapping;
460 struct dnode_of_data dn;
461 struct page *page;
462 int err;
463
464repeat:
Jaegeuk Kim767fa502014-04-29 17:35:10 +0900465 page = grab_cache_page(mapping, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800466 if (!page)
467 return ERR_PTR(-ENOMEM);
468
469 set_new_dnode(&dn, inode, NULL, NULL, 0);
470 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
471 if (err) {
472 f2fs_put_page(page, 1);
473 return ERR_PTR(err);
474 }
475 f2fs_put_dnode(&dn);
476
Changman Leeb1a94e82013-11-15 10:42:51 +0900477 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800478 f2fs_put_page(page, 1);
479 return ERR_PTR(-ENOENT);
480 }
481
482 if (PageUptodate(page))
483 return page;
484
485 /*
486 * A new dentry page is allocated but not able to be written, since its
487 * new inode page couldn't be allocated due to -ENOSPC.
488 * In such the case, its blkaddr can be remained as NEW_ADDR.
489 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
490 */
491 if (dn.data_blkaddr == NEW_ADDR) {
492 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
493 SetPageUptodate(page);
494 return page;
495 }
496
Changman Leeb1a94e82013-11-15 10:42:51 +0900497 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800498 if (err)
499 return ERR_PTR(err);
500
501 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900502 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800503 f2fs_put_page(page, 1);
504 return ERR_PTR(-EIO);
505 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900506 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800507 f2fs_put_page(page, 1);
508 goto repeat;
509 }
510 return page;
511}
512
513/*
514 * Caller ensures that this data page is never allocated.
515 * A new zero-filled data page is allocated in the page cache.
516 *
Changman Leeb1a94e82013-11-15 10:42:51 +0900517 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
518 * f2fs_unlock_op().
519 * Note that, ipage is set only by make_empty_dir.
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800520 */
521struct page *get_new_data_page(struct inode *inode,
Changman Leeb1a94e82013-11-15 10:42:51 +0900522 struct page *ipage, pgoff_t index, bool new_i_size)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800523{
524 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
525 struct address_space *mapping = inode->i_mapping;
526 struct page *page;
527 struct dnode_of_data dn;
528 int err;
529
Changman Leeb1a94e82013-11-15 10:42:51 +0900530 set_new_dnode(&dn, inode, ipage, NULL, 0);
531 err = f2fs_reserve_block(&dn, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800532 if (err)
533 return ERR_PTR(err);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800534repeat:
535 page = grab_cache_page(mapping, index);
Changman Leeb1a94e82013-11-15 10:42:51 +0900536 if (!page) {
537 err = -ENOMEM;
538 goto put_err;
539 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800540
541 if (PageUptodate(page))
542 return page;
543
544 if (dn.data_blkaddr == NEW_ADDR) {
545 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
546 SetPageUptodate(page);
547 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900548 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
549 READ_SYNC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800550 if (err)
Changman Leeb1a94e82013-11-15 10:42:51 +0900551 goto put_err;
552
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800553 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900554 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800555 f2fs_put_page(page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900556 err = -EIO;
557 goto put_err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800558 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900559 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800560 f2fs_put_page(page, 1);
561 goto repeat;
562 }
563 }
564
565 if (new_i_size &&
566 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
567 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
568 /* Only the directory inode sets new_i_size */
569 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800570 }
571 return page;
Changman Leeb1a94e82013-11-15 10:42:51 +0900572
573put_err:
574 f2fs_put_dnode(&dn);
575 return ERR_PTR(err);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800576}
577
Changman Leeb1a94e82013-11-15 10:42:51 +0900578static int __allocate_data_block(struct dnode_of_data *dn)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800579{
Changman Leeb1a94e82013-11-15 10:42:51 +0900580 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
581 struct f2fs_summary sum;
582 block_t new_blkaddr;
583 struct node_info ni;
584 int type;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800585
Changman Leeb1a94e82013-11-15 10:42:51 +0900586 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
587 return -EPERM;
588 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
589 return -ENOSPC;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800590
Changman Leeb1a94e82013-11-15 10:42:51 +0900591 __set_data_blkaddr(dn, NEW_ADDR);
592 dn->data_blkaddr = NEW_ADDR;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800593
Changman Leeb1a94e82013-11-15 10:42:51 +0900594 get_node_info(sbi, dn->nid, &ni);
595 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800596
Changman Leeb1a94e82013-11-15 10:42:51 +0900597 type = CURSEG_WARM_DATA;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800598
Changman Leeb1a94e82013-11-15 10:42:51 +0900599 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800600
Changman Leeb1a94e82013-11-15 10:42:51 +0900601 /* direct IO doesn't use extent cache to maximize the performance */
602 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
603 update_extent_cache(new_blkaddr, dn);
604 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800605
Changman Leeb1a94e82013-11-15 10:42:51 +0900606 dn->data_blkaddr = new_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800607 return 0;
608}
609
610/*
Changman Leeb1a94e82013-11-15 10:42:51 +0900611 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
612 * If original data blocks are allocated, then give them to blockdev.
613 * Otherwise,
614 * a. preallocate requested block addresses
615 * b. do not use extent cache for better performance
616 * c. give the block addresses to blockdev
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800617 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900618static int get_data_block(struct inode *inode, sector_t iblock,
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800619 struct buffer_head *bh_result, int create)
620{
Changman Leeb1a94e82013-11-15 10:42:51 +0900621 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800622 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
623 unsigned maxblocks = bh_result->b_size >> blkbits;
624 struct dnode_of_data dn;
Changman Leeb1a94e82013-11-15 10:42:51 +0900625 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
626 pgoff_t pgofs, end_offset;
627 int err = 0, ofs = 1;
628 bool allocated = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800629
630 /* Get the page offset from the block offset(iblock) */
631 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
632
Changman Leeb1a94e82013-11-15 10:42:51 +0900633 if (check_extent_cache(inode, pgofs, bh_result))
634 goto out;
635
636 if (create)
637 f2fs_lock_op(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800638
639 /* When reading holes, we need its node page */
640 set_new_dnode(&dn, inode, NULL, NULL, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900641 err = get_dnode_of_data(&dn, pgofs, mode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800642 if (err) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900643 if (err == -ENOENT)
644 err = 0;
645 goto unlock_out;
646 }
647 if (dn.data_blkaddr == NEW_ADDR)
648 goto put_out;
649
650 if (dn.data_blkaddr != NULL_ADDR) {
651 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
652 } else if (create) {
653 err = __allocate_data_block(&dn);
654 if (err)
655 goto put_out;
656 allocated = true;
657 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
658 } else {
659 goto put_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800660 }
661
Chao Yu90e712d2014-04-26 19:59:52 +0800662 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Changman Leeb1a94e82013-11-15 10:42:51 +0900663 bh_result->b_size = (((size_t)1) << blkbits);
664 dn.ofs_in_node++;
665 pgofs++;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800666
Changman Leeb1a94e82013-11-15 10:42:51 +0900667get_next:
668 if (dn.ofs_in_node >= end_offset) {
669 if (allocated)
670 sync_inode_page(&dn);
671 allocated = false;
672 f2fs_put_dnode(&dn);
673
674 set_new_dnode(&dn, inode, NULL, NULL, 0);
675 err = get_dnode_of_data(&dn, pgofs, mode);
676 if (err) {
677 if (err == -ENOENT)
678 err = 0;
679 goto unlock_out;
680 }
681 if (dn.data_blkaddr == NEW_ADDR)
682 goto put_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800683
Chao Yu90e712d2014-04-26 19:59:52 +0800684 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800685 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900686
687 if (maxblocks > (bh_result->b_size >> blkbits)) {
688 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
689 if (blkaddr == NULL_ADDR && create) {
690 err = __allocate_data_block(&dn);
691 if (err)
692 goto sync_out;
693 allocated = true;
694 blkaddr = dn.data_blkaddr;
695 }
696 /* Give more consecutive addresses for the read ahead */
697 if (blkaddr == (bh_result->b_blocknr + ofs)) {
698 ofs++;
699 dn.ofs_in_node++;
700 pgofs++;
701 bh_result->b_size += (((size_t)1) << blkbits);
702 goto get_next;
703 }
704 }
705sync_out:
706 if (allocated)
707 sync_inode_page(&dn);
708put_out:
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800709 f2fs_put_dnode(&dn);
Changman Leeb1a94e82013-11-15 10:42:51 +0900710unlock_out:
711 if (create)
712 f2fs_unlock_op(sbi);
713out:
714 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
715 return err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800716}
717
718static int f2fs_read_data_page(struct file *file, struct page *page)
719{
Changman Leeb1a94e82013-11-15 10:42:51 +0900720 struct inode *inode = page->mapping->host;
721 int ret;
722
723 /* If the file has inline data, try to read it directlly */
724 if (f2fs_has_inline_data(inode))
725 ret = f2fs_read_inline_data(inode, page);
726 else
727 ret = mpage_readpage(page, get_data_block);
728
729 return ret;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800730}
731
732static int f2fs_read_data_pages(struct file *file,
733 struct address_space *mapping,
734 struct list_head *pages, unsigned nr_pages)
735{
Changman Leeb1a94e82013-11-15 10:42:51 +0900736 struct inode *inode = file->f_mapping->host;
737
738 /* If the file has inline data, skip readpages */
739 if (f2fs_has_inline_data(inode))
740 return 0;
741
742 return mpage_readpages(mapping, pages, nr_pages, get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800743}
744
Changman Leeb1a94e82013-11-15 10:42:51 +0900745int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800746{
747 struct inode *inode = page->mapping->host;
Changman Leeb1a94e82013-11-15 10:42:51 +0900748 block_t old_blkaddr, new_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800749 struct dnode_of_data dn;
750 int err = 0;
751
752 set_new_dnode(&dn, inode, NULL, NULL, 0);
753 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
754 if (err)
755 return err;
756
Changman Leeb1a94e82013-11-15 10:42:51 +0900757 old_blkaddr = dn.data_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800758
759 /* This page is already truncated */
Changman Leeb1a94e82013-11-15 10:42:51 +0900760 if (old_blkaddr == NULL_ADDR)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800761 goto out_writepage;
762
763 set_page_writeback(page);
764
765 /*
766 * If current allocation needs SSR,
767 * it had better in-place writes for updated data.
768 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900769 if (unlikely(old_blkaddr != NEW_ADDR &&
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800770 !is_cold_data(page) &&
771 need_inplace_update(inode))) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900772 rewrite_data_page(page, old_blkaddr, fio);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800773 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900774 write_data_page(page, &dn, &new_blkaddr, fio);
775 update_extent_cache(new_blkaddr, &dn);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800776 }
777out_writepage:
778 f2fs_put_dnode(&dn);
779 return err;
780}
781
782static int f2fs_write_data_page(struct page *page,
783 struct writeback_control *wbc)
784{
785 struct inode *inode = page->mapping->host;
786 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
787 loff_t i_size = i_size_read(inode);
788 const pgoff_t end_index = ((unsigned long long) i_size)
789 >> PAGE_CACHE_SHIFT;
Changman Leeb1a94e82013-11-15 10:42:51 +0900790 unsigned offset = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800791 bool need_balance_fs = false;
792 int err = 0;
Changman Leeb1a94e82013-11-15 10:42:51 +0900793 struct f2fs_io_info fio = {
794 .type = DATA,
795 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
796 };
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800797
798 if (page->index < end_index)
799 goto write;
800
801 /*
802 * If the offset is out-of-range of file size,
803 * this page does not have to be written to disk.
804 */
805 offset = i_size & (PAGE_CACHE_SIZE - 1);
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900806 if ((page->index >= end_index + 1) || !offset)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800807 goto out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800808
809 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
810write:
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900811 if (unlikely(sbi->por_doing))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800812 goto redirty_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800813
814 /* Dentry blocks are controlled by checkpoint */
815 if (S_ISDIR(inode->i_mode)) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900816 err = do_write_data_page(page, &fio);
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900817 goto done;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800818 }
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900819
820 if (!wbc->for_reclaim)
821 need_balance_fs = true;
822 else if (has_not_enough_free_secs(sbi, 0))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800823 goto redirty_out;
824
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900825 f2fs_lock_op(sbi);
826 if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
827 err = f2fs_write_inline_data(inode, page, offset);
828 else
829 err = do_write_data_page(page, &fio);
830 f2fs_unlock_op(sbi);
831done:
832 if (err && err != -ENOENT)
833 goto redirty_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800834
835 clear_cold_data(page);
836out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900837 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800838 unlock_page(page);
839 if (need_balance_fs)
840 f2fs_balance_fs(sbi);
Jaegeuk Kim8edabc72014-04-24 09:49:52 +0900841 if (wbc->for_reclaim)
842 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800843 return 0;
844
845redirty_out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900846 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900847 return AOP_WRITEPAGE_ACTIVATE;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800848}
849
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800850static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
851 void *data)
852{
853 struct address_space *mapping = data;
854 int ret = mapping->a_ops->writepage(page, wbc);
855 mapping_set_error(mapping, ret);
856 return ret;
857}
858
859static int f2fs_write_data_pages(struct address_space *mapping,
860 struct writeback_control *wbc)
861{
862 struct inode *inode = mapping->host;
863 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
864 bool locked = false;
865 int ret;
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900866 long diff;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800867
868 /* deal with chardevs and other special file */
869 if (!mapping->a_ops->writepage)
870 return 0;
871
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +0900872 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kim250c7692014-04-16 10:47:06 +0900873 get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA) &&
874 available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kim823d59f2014-03-18 13:43:05 +0900875 goto skip_write;
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +0900876
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900877 diff = nr_pages_to_write(sbi, DATA, wbc);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800878
879 if (!S_ISDIR(inode->i_mode)) {
880 mutex_lock(&sbi->writepages);
881 locked = true;
882 }
883 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
884 if (locked)
885 mutex_unlock(&sbi->writepages);
Changman Leeb1a94e82013-11-15 10:42:51 +0900886
887 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800888
889 remove_dirty_dir_inode(inode);
890
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900891 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800892 return ret;
Jaegeuk Kim823d59f2014-03-18 13:43:05 +0900893
894skip_write:
895 wbc->pages_skipped += get_dirty_dents(inode);
896 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800897}
898
899static int f2fs_write_begin(struct file *file, struct address_space *mapping,
900 loff_t pos, unsigned len, unsigned flags,
901 struct page **pagep, void **fsdata)
902{
903 struct inode *inode = mapping->host;
904 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
905 struct page *page;
906 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
907 struct dnode_of_data dn;
908 int err = 0;
909
Chao Yudf7a5962014-05-06 16:46:04 +0800910 trace_f2fs_write_begin(inode, pos, len, flags);
911
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800912 f2fs_balance_fs(sbi);
913repeat:
Changman Leeb1a94e82013-11-15 10:42:51 +0900914 err = f2fs_convert_inline_data(inode, pos + len);
915 if (err)
916 return err;
917
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800918 page = grab_cache_page_write_begin(mapping, index, flags);
919 if (!page)
920 return -ENOMEM;
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900921
922 /* to avoid latency during memory pressure */
923 unlock_page(page);
924
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800925 *pagep = page;
926
Changman Leeb1a94e82013-11-15 10:42:51 +0900927 if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA)
928 goto inline_data;
929
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800930 f2fs_lock_op(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800931 set_new_dnode(&dn, inode, NULL, NULL, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900932 err = f2fs_reserve_block(&dn, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800933 f2fs_unlock_op(sbi);
934
Changman Leeb1a94e82013-11-15 10:42:51 +0900935 if (err) {
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900936 f2fs_put_page(page, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900937 return err;
938 }
939inline_data:
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900940 lock_page(page);
941 if (unlikely(page->mapping != mapping)) {
942 f2fs_put_page(page, 1);
943 goto repeat;
944 }
945
946 f2fs_wait_on_page_writeback(page, DATA);
947
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800948 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
949 return 0;
950
951 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
952 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
953 unsigned end = start + len;
954
955 /* Reading beyond i_size is simple: memset to zero */
956 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
957 goto out;
958 }
959
960 if (dn.data_blkaddr == NEW_ADDR) {
961 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
962 } else {
Chao Yu3d1d6d92014-03-29 15:30:40 +0800963 if (f2fs_has_inline_data(inode)) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900964 err = f2fs_read_inline_data(inode, page);
Chao Yu3d1d6d92014-03-29 15:30:40 +0800965 if (err) {
966 page_cache_release(page);
967 return err;
968 }
969 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900970 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
971 READ_SYNC);
Chao Yu3d1d6d92014-03-29 15:30:40 +0800972 if (err)
973 return err;
974 }
975
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800976 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900977 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800978 f2fs_put_page(page, 1);
979 return -EIO;
980 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900981 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800982 f2fs_put_page(page, 1);
983 goto repeat;
984 }
985 }
986out:
987 SetPageUptodate(page);
988 clear_cold_data(page);
989 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800990}
991
992static int f2fs_write_end(struct file *file,
993 struct address_space *mapping,
994 loff_t pos, unsigned len, unsigned copied,
995 struct page *page, void *fsdata)
996{
997 struct inode *inode = page->mapping->host;
998
Chao Yu3f1ac6d2014-05-06 16:47:23 +0800999 trace_f2fs_write_end(inode, pos, len, copied);
1000
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001001 SetPageUptodate(page);
1002 set_page_dirty(page);
1003
1004 if (pos + copied > i_size_read(inode)) {
1005 i_size_write(inode, pos + copied);
1006 mark_inode_dirty(inode);
1007 update_inode_page(inode);
1008 }
1009
Changman Leeb1a94e82013-11-15 10:42:51 +09001010 f2fs_put_page(page, 1);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001011 return copied;
1012}
1013
Changman Leeb1a94e82013-11-15 10:42:51 +09001014static int check_direct_IO(struct inode *inode, int rw,
1015 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1016{
1017 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
1018 int i;
1019
1020 if (rw == READ)
1021 return 0;
1022
1023 if (offset & blocksize_mask)
1024 return -EINVAL;
1025
1026 for (i = 0; i < nr_segs; i++)
1027 if (iov[i].iov_len & blocksize_mask)
1028 return -EINVAL;
1029 return 0;
1030}
1031
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001032static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
1033 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1034{
1035 struct file *file = iocb->ki_filp;
1036 struct inode *inode = file->f_mapping->host;
1037
Changman Leeb1a94e82013-11-15 10:42:51 +09001038 /* Let buffer I/O handle the inline data case. */
1039 if (f2fs_has_inline_data(inode))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001040 return 0;
1041
Changman Leeb1a94e82013-11-15 10:42:51 +09001042 if (check_direct_IO(inode, rw, iov, offset, nr_segs))
1043 return 0;
1044
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001045 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
Changman Leeb1a94e82013-11-15 10:42:51 +09001046 get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001047}
1048
1049static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
1050{
1051 struct inode *inode = page->mapping->host;
Jaegeuk Kim9694e662014-02-07 10:00:06 +09001052 if (PageDirty(page))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001053 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001054 ClearPagePrivate(page);
1055}
1056
1057static int f2fs_release_data_page(struct page *page, gfp_t wait)
1058{
1059 ClearPagePrivate(page);
1060 return 1;
1061}
1062
1063static int f2fs_set_data_page_dirty(struct page *page)
1064{
1065 struct address_space *mapping = page->mapping;
1066 struct inode *inode = mapping->host;
1067
1068 trace_f2fs_set_page_dirty(page, DATA);
1069
1070 SetPageUptodate(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001071 mark_inode_dirty(inode);
1072
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001073 if (!PageDirty(page)) {
1074 __set_page_dirty_nobuffers(page);
1075 set_dirty_dir_page(inode, page);
1076 return 1;
1077 }
1078 return 0;
1079}
1080
1081static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1082{
Chao Yu11237352014-04-22 13:34:01 +08001083 struct inode *inode = mapping->host;
1084
1085 if (f2fs_has_inline_data(inode))
1086 return 0;
1087
Changman Leeb1a94e82013-11-15 10:42:51 +09001088 return generic_block_bmap(mapping, block, get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001089}
1090
1091const struct address_space_operations f2fs_dblock_aops = {
1092 .readpage = f2fs_read_data_page,
1093 .readpages = f2fs_read_data_pages,
1094 .writepage = f2fs_write_data_page,
1095 .writepages = f2fs_write_data_pages,
1096 .write_begin = f2fs_write_begin,
1097 .write_end = f2fs_write_end,
1098 .set_page_dirty = f2fs_set_data_page_dirty,
1099 .invalidatepage = f2fs_invalidate_data_page,
1100 .releasepage = f2fs_release_data_page,
1101 .direct_IO = f2fs_direct_IO,
1102 .bmap = f2fs_bmap,
1103};