blob: 9c80f3538208094998cc217db2b71a32271412fd [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
19#include <linux/prefetch.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include <trace/events/f2fs.h>
25
Changman Leeb1a94e82013-11-15 10:42:51 +090026static void f2fs_read_end_io(struct bio *bio, int err)
27{
28 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
29 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
30
31 do {
32 struct page *page = bvec->bv_page;
33
34 if (--bvec >= bio->bi_io_vec)
35 prefetchw(&bvec->bv_page->flags);
36
37 if (unlikely(!uptodate)) {
38 ClearPageUptodate(page);
39 SetPageError(page);
40 } else {
41 SetPageUptodate(page);
42 }
43 unlock_page(page);
44 } while (bvec >= bio->bi_io_vec);
45
46 bio_put(bio);
47}
48
49static void f2fs_write_end_io(struct bio *bio, int err)
50{
51 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
52 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
Jaegeuk Kim080c4632014-02-03 10:50:22 +090053 struct f2fs_sb_info *sbi = bio->bi_private;
Changman Leeb1a94e82013-11-15 10:42:51 +090054
55 do {
56 struct page *page = bvec->bv_page;
57
58 if (--bvec >= bio->bi_io_vec)
59 prefetchw(&bvec->bv_page->flags);
60
61 if (unlikely(!uptodate)) {
62 SetPageError(page);
63 set_bit(AS_EIO, &page->mapping->flags);
64 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
65 sbi->sb->s_flags |= MS_RDONLY;
66 }
67 end_page_writeback(page);
68 dec_page_count(sbi, F2FS_WRITEBACK);
69 } while (bvec >= bio->bi_io_vec);
70
Jaegeuk Kim080c4632014-02-03 10:50:22 +090071 if (sbi->wait_io) {
72 complete(sbi->wait_io);
73 sbi->wait_io = NULL;
74 }
Changman Leeb1a94e82013-11-15 10:42:51 +090075
76 if (!get_pages(sbi, F2FS_WRITEBACK) &&
77 !list_empty(&sbi->cp_wait.task_list))
78 wake_up(&sbi->cp_wait);
79
80 bio_put(bio);
81}
82
83/*
84 * Low-level block read/write IO operations.
85 */
86static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
87 int npages, bool is_read)
88{
89 struct bio *bio;
90
91 /* No failure on bio allocation */
92 bio = bio_alloc(GFP_NOIO, npages);
93
94 bio->bi_bdev = sbi->sb->s_bdev;
95 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
96 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
Jaegeuk Kim080c4632014-02-03 10:50:22 +090097 bio->bi_private = sbi;
Changman Leeb1a94e82013-11-15 10:42:51 +090098
99 return bio;
100}
101
102static void __submit_merged_bio(struct f2fs_bio_info *io)
103{
104 struct f2fs_io_info *fio = &io->fio;
105 int rw;
106
107 if (!io->bio)
108 return;
109
110 rw = fio->rw;
111
112 if (is_read_io(rw)) {
113 trace_f2fs_submit_read_bio(io->sbi->sb, rw,
114 fio->type, io->bio);
115 submit_bio(rw, io->bio);
116 } else {
117 trace_f2fs_submit_write_bio(io->sbi->sb, rw,
118 fio->type, io->bio);
119 /*
120 * META_FLUSH is only from the checkpoint procedure, and we
121 * should wait this metadata bio for FS consistency.
122 */
123 if (fio->type == META_FLUSH) {
124 DECLARE_COMPLETION_ONSTACK(wait);
Jaegeuk Kim080c4632014-02-03 10:50:22 +0900125 io->sbi->wait_io = &wait;
Changman Leeb1a94e82013-11-15 10:42:51 +0900126 submit_bio(rw, io->bio);
127 wait_for_completion(&wait);
128 } else {
129 submit_bio(rw, io->bio);
130 }
131 }
132
133 io->bio = NULL;
134}
135
136void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
137 enum page_type type, int rw)
138{
139 enum page_type btype = PAGE_TYPE_OF_BIO(type);
140 struct f2fs_bio_info *io;
141
142 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
143
144 mutex_lock(&io->io_mutex);
145
146 /* change META to META_FLUSH in the checkpoint procedure */
147 if (type >= META_FLUSH) {
148 io->fio.type = META_FLUSH;
149 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
150 }
151 __submit_merged_bio(io);
152 mutex_unlock(&io->io_mutex);
153}
154
155/*
156 * Fill the locked page with data located in the block address.
157 * Return unlocked page.
158 */
159int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
160 block_t blk_addr, int rw)
161{
162 struct bio *bio;
163
164 trace_f2fs_submit_page_bio(page, blk_addr, rw);
165
166 /* Allocate a new bio */
167 bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
168
169 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
170 bio_put(bio);
171 f2fs_put_page(page, 1);
172 return -EFAULT;
173 }
174
175 submit_bio(rw, bio);
176 return 0;
177}
178
179void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
180 block_t blk_addr, struct f2fs_io_info *fio)
181{
182 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
183 struct f2fs_bio_info *io;
184 bool is_read = is_read_io(fio->rw);
185
186 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
187
188 verify_block_addr(sbi, blk_addr);
189
190 mutex_lock(&io->io_mutex);
191
192 if (!is_read)
193 inc_page_count(sbi, F2FS_WRITEBACK);
194
195 if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
196 io->fio.rw != fio->rw))
197 __submit_merged_bio(io);
198alloc_new:
199 if (io->bio == NULL) {
200 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
201
202 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
203 io->fio = *fio;
204 }
205
206 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
207 PAGE_CACHE_SIZE) {
208 __submit_merged_bio(io);
209 goto alloc_new;
210 }
211
212 io->last_block_in_bio = blk_addr;
213
214 mutex_unlock(&io->io_mutex);
215 trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
216}
217
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800218/*
219 * Lock ordering for the change of data block address:
220 * ->data_page
221 * ->node_page
222 * update block addresses in the node page
223 */
224static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
225{
226 struct f2fs_node *rn;
227 __le32 *addr_array;
228 struct page *node_page = dn->node_page;
229 unsigned int ofs_in_node = dn->ofs_in_node;
230
Changman Leeb1a94e82013-11-15 10:42:51 +0900231 f2fs_wait_on_page_writeback(node_page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800232
233 rn = F2FS_NODE(node_page);
234
235 /* Get physical address of data block */
236 addr_array = blkaddr_in_node(rn);
237 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
238 set_page_dirty(node_page);
239}
240
241int reserve_new_block(struct dnode_of_data *dn)
242{
243 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
244
Changman Leeb1a94e82013-11-15 10:42:51 +0900245 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800246 return -EPERM;
Changman Leeb1a94e82013-11-15 10:42:51 +0900247 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800248 return -ENOSPC;
249
250 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
251
252 __set_data_blkaddr(dn, NEW_ADDR);
253 dn->data_blkaddr = NEW_ADDR;
Changman Leeb1a94e82013-11-15 10:42:51 +0900254 mark_inode_dirty(dn->inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800255 sync_inode_page(dn);
256 return 0;
257}
258
Changman Leeb1a94e82013-11-15 10:42:51 +0900259int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
260{
261 bool need_put = dn->inode_page ? false : true;
262 int err;
263
264 /* if inode_page exists, index should be zero */
265 f2fs_bug_on(!need_put && index);
266
267 err = get_dnode_of_data(dn, index, ALLOC_NODE);
268 if (err)
269 return err;
270
271 if (dn->data_blkaddr == NULL_ADDR)
272 err = reserve_new_block(dn);
273 if (err || need_put)
274 f2fs_put_dnode(dn);
275 return err;
276}
277
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800278static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
279 struct buffer_head *bh_result)
280{
281 struct f2fs_inode_info *fi = F2FS_I(inode);
282 pgoff_t start_fofs, end_fofs;
283 block_t start_blkaddr;
284
Changman Leeb1a94e82013-11-15 10:42:51 +0900285 if (is_inode_flag_set(fi, FI_NO_EXTENT))
286 return 0;
287
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800288 read_lock(&fi->ext.ext_lock);
289 if (fi->ext.len == 0) {
290 read_unlock(&fi->ext.ext_lock);
291 return 0;
292 }
293
294 stat_inc_total_hit(inode->i_sb);
295
296 start_fofs = fi->ext.fofs;
297 end_fofs = fi->ext.fofs + fi->ext.len - 1;
298 start_blkaddr = fi->ext.blk_addr;
299
300 if (pgofs >= start_fofs && pgofs <= end_fofs) {
301 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
302 size_t count;
303
304 clear_buffer_new(bh_result);
305 map_bh(bh_result, inode->i_sb,
306 start_blkaddr + pgofs - start_fofs);
307 count = end_fofs - pgofs + 1;
308 if (count < (UINT_MAX >> blkbits))
309 bh_result->b_size = (count << blkbits);
310 else
311 bh_result->b_size = UINT_MAX;
312
313 stat_inc_read_hit(inode->i_sb);
314 read_unlock(&fi->ext.ext_lock);
315 return 1;
316 }
317 read_unlock(&fi->ext.ext_lock);
318 return 0;
319}
320
321void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
322{
323 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
324 pgoff_t fofs, start_fofs, end_fofs;
325 block_t start_blkaddr, end_blkaddr;
Changman Leeb1a94e82013-11-15 10:42:51 +0900326 int need_update = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800327
328 f2fs_bug_on(blk_addr == NEW_ADDR);
329 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
330 dn->ofs_in_node;
331
332 /* Update the page address in the parent node */
333 __set_data_blkaddr(dn, blk_addr);
334
Changman Leeb1a94e82013-11-15 10:42:51 +0900335 if (is_inode_flag_set(fi, FI_NO_EXTENT))
336 return;
337
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800338 write_lock(&fi->ext.ext_lock);
339
340 start_fofs = fi->ext.fofs;
341 end_fofs = fi->ext.fofs + fi->ext.len - 1;
342 start_blkaddr = fi->ext.blk_addr;
343 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
344
345 /* Drop and initialize the matched extent */
346 if (fi->ext.len == 1 && fofs == start_fofs)
347 fi->ext.len = 0;
348
349 /* Initial extent */
350 if (fi->ext.len == 0) {
351 if (blk_addr != NULL_ADDR) {
352 fi->ext.fofs = fofs;
353 fi->ext.blk_addr = blk_addr;
354 fi->ext.len = 1;
355 }
356 goto end_update;
357 }
358
359 /* Front merge */
360 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
361 fi->ext.fofs--;
362 fi->ext.blk_addr--;
363 fi->ext.len++;
364 goto end_update;
365 }
366
367 /* Back merge */
368 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
369 fi->ext.len++;
370 goto end_update;
371 }
372
373 /* Split the existing extent */
374 if (fi->ext.len > 1 &&
375 fofs >= start_fofs && fofs <= end_fofs) {
376 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
377 fi->ext.len = fofs - start_fofs;
378 } else {
379 fi->ext.fofs = fofs + 1;
380 fi->ext.blk_addr = start_blkaddr +
381 fofs - start_fofs + 1;
382 fi->ext.len -= fofs - start_fofs + 1;
383 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900384 } else {
385 need_update = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800386 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800387
Changman Leeb1a94e82013-11-15 10:42:51 +0900388 /* Finally, if the extent is very fragmented, let's drop the cache. */
389 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
390 fi->ext.len = 0;
391 set_inode_flag(fi, FI_NO_EXTENT);
392 need_update = true;
393 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800394end_update:
395 write_unlock(&fi->ext.ext_lock);
Changman Leeb1a94e82013-11-15 10:42:51 +0900396 if (need_update)
397 sync_inode_page(dn);
398 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800399}
400
401struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
402{
403 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
404 struct address_space *mapping = inode->i_mapping;
405 struct dnode_of_data dn;
406 struct page *page;
407 int err;
408
409 page = find_get_page(mapping, index);
410 if (page && PageUptodate(page))
411 return page;
412 f2fs_put_page(page, 0);
413
414 set_new_dnode(&dn, inode, NULL, NULL, 0);
415 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
416 if (err)
417 return ERR_PTR(err);
418 f2fs_put_dnode(&dn);
419
420 if (dn.data_blkaddr == NULL_ADDR)
421 return ERR_PTR(-ENOENT);
422
423 /* By fallocate(), there is no cached page, but with NEW_ADDR */
Changman Leeb1a94e82013-11-15 10:42:51 +0900424 if (unlikely(dn.data_blkaddr == NEW_ADDR))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800425 return ERR_PTR(-EINVAL);
426
Jaegeuk Kim767fa502014-04-29 17:35:10 +0900427 page = grab_cache_page(mapping, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800428 if (!page)
429 return ERR_PTR(-ENOMEM);
430
431 if (PageUptodate(page)) {
432 unlock_page(page);
433 return page;
434 }
435
Changman Leeb1a94e82013-11-15 10:42:51 +0900436 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800437 sync ? READ_SYNC : READA);
Changman Leeb1a94e82013-11-15 10:42:51 +0900438 if (err)
439 return ERR_PTR(err);
440
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800441 if (sync) {
442 wait_on_page_locked(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900443 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800444 f2fs_put_page(page, 0);
445 return ERR_PTR(-EIO);
446 }
447 }
448 return page;
449}
450
451/*
452 * If it tries to access a hole, return an error.
453 * Because, the callers, functions in dir.c and GC, should be able to know
454 * whether this page exists or not.
455 */
456struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
457{
458 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
459 struct address_space *mapping = inode->i_mapping;
460 struct dnode_of_data dn;
461 struct page *page;
462 int err;
463
464repeat:
Jaegeuk Kim767fa502014-04-29 17:35:10 +0900465 page = grab_cache_page(mapping, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800466 if (!page)
467 return ERR_PTR(-ENOMEM);
468
469 set_new_dnode(&dn, inode, NULL, NULL, 0);
470 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
471 if (err) {
472 f2fs_put_page(page, 1);
473 return ERR_PTR(err);
474 }
475 f2fs_put_dnode(&dn);
476
Changman Leeb1a94e82013-11-15 10:42:51 +0900477 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800478 f2fs_put_page(page, 1);
479 return ERR_PTR(-ENOENT);
480 }
481
482 if (PageUptodate(page))
483 return page;
484
485 /*
486 * A new dentry page is allocated but not able to be written, since its
487 * new inode page couldn't be allocated due to -ENOSPC.
488 * In such the case, its blkaddr can be remained as NEW_ADDR.
489 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
490 */
491 if (dn.data_blkaddr == NEW_ADDR) {
492 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
493 SetPageUptodate(page);
494 return page;
495 }
496
Changman Leeb1a94e82013-11-15 10:42:51 +0900497 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800498 if (err)
499 return ERR_PTR(err);
500
501 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900502 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800503 f2fs_put_page(page, 1);
504 return ERR_PTR(-EIO);
505 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900506 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800507 f2fs_put_page(page, 1);
508 goto repeat;
509 }
510 return page;
511}
512
513/*
514 * Caller ensures that this data page is never allocated.
515 * A new zero-filled data page is allocated in the page cache.
516 *
Changman Leeb1a94e82013-11-15 10:42:51 +0900517 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
518 * f2fs_unlock_op().
519 * Note that, ipage is set only by make_empty_dir.
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800520 */
521struct page *get_new_data_page(struct inode *inode,
Changman Leeb1a94e82013-11-15 10:42:51 +0900522 struct page *ipage, pgoff_t index, bool new_i_size)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800523{
524 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
525 struct address_space *mapping = inode->i_mapping;
526 struct page *page;
527 struct dnode_of_data dn;
528 int err;
529
Changman Leeb1a94e82013-11-15 10:42:51 +0900530 set_new_dnode(&dn, inode, ipage, NULL, 0);
531 err = f2fs_reserve_block(&dn, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800532 if (err)
533 return ERR_PTR(err);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800534repeat:
535 page = grab_cache_page(mapping, index);
Changman Leeb1a94e82013-11-15 10:42:51 +0900536 if (!page) {
537 err = -ENOMEM;
538 goto put_err;
539 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800540
541 if (PageUptodate(page))
542 return page;
543
544 if (dn.data_blkaddr == NEW_ADDR) {
545 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
546 SetPageUptodate(page);
547 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900548 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
549 READ_SYNC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800550 if (err)
Changman Leeb1a94e82013-11-15 10:42:51 +0900551 goto put_err;
552
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800553 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900554 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800555 f2fs_put_page(page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900556 err = -EIO;
557 goto put_err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800558 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900559 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800560 f2fs_put_page(page, 1);
561 goto repeat;
562 }
563 }
564
565 if (new_i_size &&
566 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
567 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
568 /* Only the directory inode sets new_i_size */
569 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800570 }
571 return page;
Changman Leeb1a94e82013-11-15 10:42:51 +0900572
573put_err:
574 f2fs_put_dnode(&dn);
575 return ERR_PTR(err);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800576}
577
Changman Leeb1a94e82013-11-15 10:42:51 +0900578static int __allocate_data_block(struct dnode_of_data *dn)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800579{
Changman Leeb1a94e82013-11-15 10:42:51 +0900580 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
581 struct f2fs_summary sum;
582 block_t new_blkaddr;
583 struct node_info ni;
584 int type;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800585
Changman Leeb1a94e82013-11-15 10:42:51 +0900586 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
587 return -EPERM;
588 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
589 return -ENOSPC;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800590
Changman Leeb1a94e82013-11-15 10:42:51 +0900591 __set_data_blkaddr(dn, NEW_ADDR);
592 dn->data_blkaddr = NEW_ADDR;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800593
Changman Leeb1a94e82013-11-15 10:42:51 +0900594 get_node_info(sbi, dn->nid, &ni);
595 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800596
Changman Leeb1a94e82013-11-15 10:42:51 +0900597 type = CURSEG_WARM_DATA;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800598
Changman Leeb1a94e82013-11-15 10:42:51 +0900599 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800600
Changman Leeb1a94e82013-11-15 10:42:51 +0900601 /* direct IO doesn't use extent cache to maximize the performance */
602 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
603 update_extent_cache(new_blkaddr, dn);
604 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800605
Changman Leeb1a94e82013-11-15 10:42:51 +0900606 dn->data_blkaddr = new_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800607 return 0;
608}
609
610/*
Changman Leeb1a94e82013-11-15 10:42:51 +0900611 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
612 * If original data blocks are allocated, then give them to blockdev.
613 * Otherwise,
614 * a. preallocate requested block addresses
615 * b. do not use extent cache for better performance
616 * c. give the block addresses to blockdev
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800617 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900618static int get_data_block(struct inode *inode, sector_t iblock,
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800619 struct buffer_head *bh_result, int create)
620{
Changman Leeb1a94e82013-11-15 10:42:51 +0900621 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800622 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
623 unsigned maxblocks = bh_result->b_size >> blkbits;
624 struct dnode_of_data dn;
Changman Leeb1a94e82013-11-15 10:42:51 +0900625 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
626 pgoff_t pgofs, end_offset;
627 int err = 0, ofs = 1;
628 bool allocated = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800629
630 /* Get the page offset from the block offset(iblock) */
631 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
632
Changman Leeb1a94e82013-11-15 10:42:51 +0900633 if (check_extent_cache(inode, pgofs, bh_result))
634 goto out;
635
636 if (create)
637 f2fs_lock_op(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800638
639 /* When reading holes, we need its node page */
640 set_new_dnode(&dn, inode, NULL, NULL, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900641 err = get_dnode_of_data(&dn, pgofs, mode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800642 if (err) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900643 if (err == -ENOENT)
644 err = 0;
645 goto unlock_out;
646 }
647 if (dn.data_blkaddr == NEW_ADDR)
648 goto put_out;
649
650 if (dn.data_blkaddr != NULL_ADDR) {
651 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
652 } else if (create) {
653 err = __allocate_data_block(&dn);
654 if (err)
655 goto put_out;
656 allocated = true;
657 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
658 } else {
659 goto put_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800660 }
661
Chao Yu90e712d2014-04-26 19:59:52 +0800662 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Changman Leeb1a94e82013-11-15 10:42:51 +0900663 bh_result->b_size = (((size_t)1) << blkbits);
664 dn.ofs_in_node++;
665 pgofs++;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800666
Changman Leeb1a94e82013-11-15 10:42:51 +0900667get_next:
668 if (dn.ofs_in_node >= end_offset) {
669 if (allocated)
670 sync_inode_page(&dn);
671 allocated = false;
672 f2fs_put_dnode(&dn);
673
674 set_new_dnode(&dn, inode, NULL, NULL, 0);
675 err = get_dnode_of_data(&dn, pgofs, mode);
676 if (err) {
677 if (err == -ENOENT)
678 err = 0;
679 goto unlock_out;
680 }
681 if (dn.data_blkaddr == NEW_ADDR)
682 goto put_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800683
Chao Yu90e712d2014-04-26 19:59:52 +0800684 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800685 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900686
687 if (maxblocks > (bh_result->b_size >> blkbits)) {
688 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
689 if (blkaddr == NULL_ADDR && create) {
690 err = __allocate_data_block(&dn);
691 if (err)
692 goto sync_out;
693 allocated = true;
694 blkaddr = dn.data_blkaddr;
695 }
696 /* Give more consecutive addresses for the read ahead */
697 if (blkaddr == (bh_result->b_blocknr + ofs)) {
698 ofs++;
699 dn.ofs_in_node++;
700 pgofs++;
701 bh_result->b_size += (((size_t)1) << blkbits);
702 goto get_next;
703 }
704 }
705sync_out:
706 if (allocated)
707 sync_inode_page(&dn);
708put_out:
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800709 f2fs_put_dnode(&dn);
Changman Leeb1a94e82013-11-15 10:42:51 +0900710unlock_out:
711 if (create)
712 f2fs_unlock_op(sbi);
713out:
714 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
715 return err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800716}
717
718static int f2fs_read_data_page(struct file *file, struct page *page)
719{
Changman Leeb1a94e82013-11-15 10:42:51 +0900720 struct inode *inode = page->mapping->host;
721 int ret;
722
Chao Yu12d38fc2014-05-06 16:53:08 +0800723 trace_f2fs_readpage(page, DATA);
724
Changman Leeb1a94e82013-11-15 10:42:51 +0900725 /* If the file has inline data, try to read it directlly */
726 if (f2fs_has_inline_data(inode))
727 ret = f2fs_read_inline_data(inode, page);
728 else
729 ret = mpage_readpage(page, get_data_block);
730
731 return ret;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800732}
733
734static int f2fs_read_data_pages(struct file *file,
735 struct address_space *mapping,
736 struct list_head *pages, unsigned nr_pages)
737{
Changman Leeb1a94e82013-11-15 10:42:51 +0900738 struct inode *inode = file->f_mapping->host;
739
740 /* If the file has inline data, skip readpages */
741 if (f2fs_has_inline_data(inode))
742 return 0;
743
744 return mpage_readpages(mapping, pages, nr_pages, get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800745}
746
Changman Leeb1a94e82013-11-15 10:42:51 +0900747int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800748{
749 struct inode *inode = page->mapping->host;
Changman Leeb1a94e82013-11-15 10:42:51 +0900750 block_t old_blkaddr, new_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800751 struct dnode_of_data dn;
752 int err = 0;
753
754 set_new_dnode(&dn, inode, NULL, NULL, 0);
755 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
756 if (err)
757 return err;
758
Changman Leeb1a94e82013-11-15 10:42:51 +0900759 old_blkaddr = dn.data_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800760
761 /* This page is already truncated */
Changman Leeb1a94e82013-11-15 10:42:51 +0900762 if (old_blkaddr == NULL_ADDR)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800763 goto out_writepage;
764
765 set_page_writeback(page);
766
767 /*
768 * If current allocation needs SSR,
769 * it had better in-place writes for updated data.
770 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900771 if (unlikely(old_blkaddr != NEW_ADDR &&
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800772 !is_cold_data(page) &&
773 need_inplace_update(inode))) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900774 rewrite_data_page(page, old_blkaddr, fio);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800775 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900776 write_data_page(page, &dn, &new_blkaddr, fio);
777 update_extent_cache(new_blkaddr, &dn);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800778 }
779out_writepage:
780 f2fs_put_dnode(&dn);
781 return err;
782}
783
784static int f2fs_write_data_page(struct page *page,
785 struct writeback_control *wbc)
786{
787 struct inode *inode = page->mapping->host;
788 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
789 loff_t i_size = i_size_read(inode);
790 const pgoff_t end_index = ((unsigned long long) i_size)
791 >> PAGE_CACHE_SHIFT;
Changman Leeb1a94e82013-11-15 10:42:51 +0900792 unsigned offset = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800793 bool need_balance_fs = false;
794 int err = 0;
Changman Leeb1a94e82013-11-15 10:42:51 +0900795 struct f2fs_io_info fio = {
796 .type = DATA,
797 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
798 };
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800799
Chao Yu327cb6d2014-05-06 16:48:26 +0800800 trace_f2fs_writepage(page, DATA);
801
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800802 if (page->index < end_index)
803 goto write;
804
805 /*
806 * If the offset is out-of-range of file size,
807 * this page does not have to be written to disk.
808 */
809 offset = i_size & (PAGE_CACHE_SIZE - 1);
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900810 if ((page->index >= end_index + 1) || !offset)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800811 goto out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800812
813 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
814write:
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900815 if (unlikely(sbi->por_doing))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800816 goto redirty_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800817
818 /* Dentry blocks are controlled by checkpoint */
819 if (S_ISDIR(inode->i_mode)) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900820 err = do_write_data_page(page, &fio);
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900821 goto done;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800822 }
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900823
824 if (!wbc->for_reclaim)
825 need_balance_fs = true;
826 else if (has_not_enough_free_secs(sbi, 0))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800827 goto redirty_out;
828
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900829 f2fs_lock_op(sbi);
830 if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
831 err = f2fs_write_inline_data(inode, page, offset);
832 else
833 err = do_write_data_page(page, &fio);
834 f2fs_unlock_op(sbi);
835done:
836 if (err && err != -ENOENT)
837 goto redirty_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800838
839 clear_cold_data(page);
840out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900841 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800842 unlock_page(page);
843 if (need_balance_fs)
844 f2fs_balance_fs(sbi);
Jaegeuk Kim8edabc72014-04-24 09:49:52 +0900845 if (wbc->for_reclaim)
846 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800847 return 0;
848
849redirty_out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900850 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900851 return AOP_WRITEPAGE_ACTIVATE;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800852}
853
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800854static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
855 void *data)
856{
857 struct address_space *mapping = data;
858 int ret = mapping->a_ops->writepage(page, wbc);
859 mapping_set_error(mapping, ret);
860 return ret;
861}
862
863static int f2fs_write_data_pages(struct address_space *mapping,
864 struct writeback_control *wbc)
865{
866 struct inode *inode = mapping->host;
867 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
868 bool locked = false;
869 int ret;
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900870 long diff;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800871
Chao Yub4d85492014-05-06 16:51:24 +0800872 trace_f2fs_writepages(mapping->host, wbc, DATA);
873
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800874 /* deal with chardevs and other special file */
875 if (!mapping->a_ops->writepage)
876 return 0;
877
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +0900878 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kim250c7692014-04-16 10:47:06 +0900879 get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA) &&
880 available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kim823d59f2014-03-18 13:43:05 +0900881 goto skip_write;
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +0900882
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900883 diff = nr_pages_to_write(sbi, DATA, wbc);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800884
885 if (!S_ISDIR(inode->i_mode)) {
886 mutex_lock(&sbi->writepages);
887 locked = true;
888 }
889 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
890 if (locked)
891 mutex_unlock(&sbi->writepages);
Changman Leeb1a94e82013-11-15 10:42:51 +0900892
893 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800894
895 remove_dirty_dir_inode(inode);
896
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900897 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800898 return ret;
Jaegeuk Kim823d59f2014-03-18 13:43:05 +0900899
900skip_write:
901 wbc->pages_skipped += get_dirty_dents(inode);
902 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800903}
904
905static int f2fs_write_begin(struct file *file, struct address_space *mapping,
906 loff_t pos, unsigned len, unsigned flags,
907 struct page **pagep, void **fsdata)
908{
909 struct inode *inode = mapping->host;
910 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
911 struct page *page;
912 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
913 struct dnode_of_data dn;
914 int err = 0;
915
Chao Yudf7a5962014-05-06 16:46:04 +0800916 trace_f2fs_write_begin(inode, pos, len, flags);
917
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800918 f2fs_balance_fs(sbi);
919repeat:
Changman Leeb1a94e82013-11-15 10:42:51 +0900920 err = f2fs_convert_inline_data(inode, pos + len);
921 if (err)
922 return err;
923
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800924 page = grab_cache_page_write_begin(mapping, index, flags);
925 if (!page)
926 return -ENOMEM;
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900927
928 /* to avoid latency during memory pressure */
929 unlock_page(page);
930
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800931 *pagep = page;
932
Changman Leeb1a94e82013-11-15 10:42:51 +0900933 if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA)
934 goto inline_data;
935
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800936 f2fs_lock_op(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800937 set_new_dnode(&dn, inode, NULL, NULL, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900938 err = f2fs_reserve_block(&dn, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800939 f2fs_unlock_op(sbi);
940
Changman Leeb1a94e82013-11-15 10:42:51 +0900941 if (err) {
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900942 f2fs_put_page(page, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900943 return err;
944 }
945inline_data:
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900946 lock_page(page);
947 if (unlikely(page->mapping != mapping)) {
948 f2fs_put_page(page, 1);
949 goto repeat;
950 }
951
952 f2fs_wait_on_page_writeback(page, DATA);
953
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800954 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
955 return 0;
956
957 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
958 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
959 unsigned end = start + len;
960
961 /* Reading beyond i_size is simple: memset to zero */
962 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
963 goto out;
964 }
965
966 if (dn.data_blkaddr == NEW_ADDR) {
967 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
968 } else {
Chao Yu3d1d6d92014-03-29 15:30:40 +0800969 if (f2fs_has_inline_data(inode)) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900970 err = f2fs_read_inline_data(inode, page);
Chao Yu3d1d6d92014-03-29 15:30:40 +0800971 if (err) {
972 page_cache_release(page);
973 return err;
974 }
975 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900976 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
977 READ_SYNC);
Chao Yu3d1d6d92014-03-29 15:30:40 +0800978 if (err)
979 return err;
980 }
981
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800982 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900983 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800984 f2fs_put_page(page, 1);
985 return -EIO;
986 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900987 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800988 f2fs_put_page(page, 1);
989 goto repeat;
990 }
991 }
992out:
993 SetPageUptodate(page);
994 clear_cold_data(page);
995 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800996}
997
998static int f2fs_write_end(struct file *file,
999 struct address_space *mapping,
1000 loff_t pos, unsigned len, unsigned copied,
1001 struct page *page, void *fsdata)
1002{
1003 struct inode *inode = page->mapping->host;
1004
Chao Yu3f1ac6d2014-05-06 16:47:23 +08001005 trace_f2fs_write_end(inode, pos, len, copied);
1006
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001007 SetPageUptodate(page);
1008 set_page_dirty(page);
1009
1010 if (pos + copied > i_size_read(inode)) {
1011 i_size_write(inode, pos + copied);
1012 mark_inode_dirty(inode);
1013 update_inode_page(inode);
1014 }
1015
Changman Leeb1a94e82013-11-15 10:42:51 +09001016 f2fs_put_page(page, 1);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001017 return copied;
1018}
1019
Changman Leeb1a94e82013-11-15 10:42:51 +09001020static int check_direct_IO(struct inode *inode, int rw,
1021 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1022{
1023 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
1024 int i;
1025
1026 if (rw == READ)
1027 return 0;
1028
1029 if (offset & blocksize_mask)
1030 return -EINVAL;
1031
1032 for (i = 0; i < nr_segs; i++)
1033 if (iov[i].iov_len & blocksize_mask)
1034 return -EINVAL;
1035 return 0;
1036}
1037
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001038static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
1039 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1040{
1041 struct file *file = iocb->ki_filp;
1042 struct inode *inode = file->f_mapping->host;
1043
Changman Leeb1a94e82013-11-15 10:42:51 +09001044 /* Let buffer I/O handle the inline data case. */
1045 if (f2fs_has_inline_data(inode))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001046 return 0;
1047
Changman Leeb1a94e82013-11-15 10:42:51 +09001048 if (check_direct_IO(inode, rw, iov, offset, nr_segs))
1049 return 0;
1050
Jaegeuk Kim44f7a3b2014-06-04 00:39:42 +09001051 /* clear fsync mark to recover these blocks */
1052 fsync_mark_clear(F2FS_SB(inode->i_sb), inode->i_ino);
1053
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001054 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
Changman Leeb1a94e82013-11-15 10:42:51 +09001055 get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001056}
1057
1058static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
1059{
1060 struct inode *inode = page->mapping->host;
Jaegeuk Kim9694e662014-02-07 10:00:06 +09001061 if (PageDirty(page))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001062 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001063 ClearPagePrivate(page);
1064}
1065
1066static int f2fs_release_data_page(struct page *page, gfp_t wait)
1067{
1068 ClearPagePrivate(page);
1069 return 1;
1070}
1071
1072static int f2fs_set_data_page_dirty(struct page *page)
1073{
1074 struct address_space *mapping = page->mapping;
1075 struct inode *inode = mapping->host;
1076
1077 trace_f2fs_set_page_dirty(page, DATA);
1078
1079 SetPageUptodate(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001080 mark_inode_dirty(inode);
1081
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001082 if (!PageDirty(page)) {
1083 __set_page_dirty_nobuffers(page);
1084 set_dirty_dir_page(inode, page);
1085 return 1;
1086 }
1087 return 0;
1088}
1089
1090static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1091{
Chao Yu11237352014-04-22 13:34:01 +08001092 struct inode *inode = mapping->host;
1093
1094 if (f2fs_has_inline_data(inode))
1095 return 0;
1096
Changman Leeb1a94e82013-11-15 10:42:51 +09001097 return generic_block_bmap(mapping, block, get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001098}
1099
1100const struct address_space_operations f2fs_dblock_aops = {
1101 .readpage = f2fs_read_data_page,
1102 .readpages = f2fs_read_data_pages,
1103 .writepage = f2fs_write_data_page,
1104 .writepages = f2fs_write_data_pages,
1105 .write_begin = f2fs_write_begin,
1106 .write_end = f2fs_write_end,
1107 .set_page_dirty = f2fs_set_data_page_dirty,
1108 .invalidatepage = f2fs_invalidate_data_page,
1109 .releasepage = f2fs_release_data_page,
1110 .direct_IO = f2fs_direct_IO,
1111 .bmap = f2fs_bmap,
1112};