blob: e3ce848955da367f82ce14b91c2c6523da8416af [file] [log] [blame]
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001/*
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
19#include <linux/prefetch.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include <trace/events/f2fs.h>
25
Changman Leeb1a94e82013-11-15 10:42:51 +090026static void f2fs_read_end_io(struct bio *bio, int err)
27{
28 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
29 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
30
31 do {
32 struct page *page = bvec->bv_page;
33
34 if (--bvec >= bio->bi_io_vec)
35 prefetchw(&bvec->bv_page->flags);
36
37 if (unlikely(!uptodate)) {
38 ClearPageUptodate(page);
39 SetPageError(page);
40 } else {
41 SetPageUptodate(page);
42 }
43 unlock_page(page);
44 } while (bvec >= bio->bi_io_vec);
45
46 bio_put(bio);
47}
48
49static void f2fs_write_end_io(struct bio *bio, int err)
50{
51 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
52 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
Jaegeuk Kim080c4632014-02-03 10:50:22 +090053 struct f2fs_sb_info *sbi = bio->bi_private;
Changman Leeb1a94e82013-11-15 10:42:51 +090054
55 do {
56 struct page *page = bvec->bv_page;
57
58 if (--bvec >= bio->bi_io_vec)
59 prefetchw(&bvec->bv_page->flags);
60
61 if (unlikely(!uptodate)) {
62 SetPageError(page);
63 set_bit(AS_EIO, &page->mapping->flags);
64 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
65 sbi->sb->s_flags |= MS_RDONLY;
66 }
67 end_page_writeback(page);
68 dec_page_count(sbi, F2FS_WRITEBACK);
69 } while (bvec >= bio->bi_io_vec);
70
Jaegeuk Kim080c4632014-02-03 10:50:22 +090071 if (sbi->wait_io) {
72 complete(sbi->wait_io);
73 sbi->wait_io = NULL;
74 }
Changman Leeb1a94e82013-11-15 10:42:51 +090075
76 if (!get_pages(sbi, F2FS_WRITEBACK) &&
77 !list_empty(&sbi->cp_wait.task_list))
78 wake_up(&sbi->cp_wait);
79
80 bio_put(bio);
81}
82
83/*
84 * Low-level block read/write IO operations.
85 */
86static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
87 int npages, bool is_read)
88{
89 struct bio *bio;
90
91 /* No failure on bio allocation */
92 bio = bio_alloc(GFP_NOIO, npages);
93
94 bio->bi_bdev = sbi->sb->s_bdev;
95 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
96 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
Jaegeuk Kim080c4632014-02-03 10:50:22 +090097 bio->bi_private = sbi;
Changman Leeb1a94e82013-11-15 10:42:51 +090098
99 return bio;
100}
101
102static void __submit_merged_bio(struct f2fs_bio_info *io)
103{
104 struct f2fs_io_info *fio = &io->fio;
105 int rw;
106
107 if (!io->bio)
108 return;
109
110 rw = fio->rw;
111
112 if (is_read_io(rw)) {
113 trace_f2fs_submit_read_bio(io->sbi->sb, rw,
114 fio->type, io->bio);
115 submit_bio(rw, io->bio);
116 } else {
117 trace_f2fs_submit_write_bio(io->sbi->sb, rw,
118 fio->type, io->bio);
119 /*
120 * META_FLUSH is only from the checkpoint procedure, and we
121 * should wait this metadata bio for FS consistency.
122 */
123 if (fio->type == META_FLUSH) {
124 DECLARE_COMPLETION_ONSTACK(wait);
Jaegeuk Kim080c4632014-02-03 10:50:22 +0900125 io->sbi->wait_io = &wait;
Changman Leeb1a94e82013-11-15 10:42:51 +0900126 submit_bio(rw, io->bio);
127 wait_for_completion(&wait);
128 } else {
129 submit_bio(rw, io->bio);
130 }
131 }
132
133 io->bio = NULL;
134}
135
136void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
137 enum page_type type, int rw)
138{
139 enum page_type btype = PAGE_TYPE_OF_BIO(type);
140 struct f2fs_bio_info *io;
141
142 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
143
144 mutex_lock(&io->io_mutex);
145
146 /* change META to META_FLUSH in the checkpoint procedure */
147 if (type >= META_FLUSH) {
148 io->fio.type = META_FLUSH;
149 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
150 }
151 __submit_merged_bio(io);
152 mutex_unlock(&io->io_mutex);
153}
154
155/*
156 * Fill the locked page with data located in the block address.
157 * Return unlocked page.
158 */
159int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
160 block_t blk_addr, int rw)
161{
162 struct bio *bio;
163
164 trace_f2fs_submit_page_bio(page, blk_addr, rw);
165
166 /* Allocate a new bio */
167 bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
168
169 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
170 bio_put(bio);
171 f2fs_put_page(page, 1);
172 return -EFAULT;
173 }
174
175 submit_bio(rw, bio);
176 return 0;
177}
178
179void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
180 block_t blk_addr, struct f2fs_io_info *fio)
181{
182 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
183 struct f2fs_bio_info *io;
184 bool is_read = is_read_io(fio->rw);
185
186 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
187
188 verify_block_addr(sbi, blk_addr);
189
190 mutex_lock(&io->io_mutex);
191
192 if (!is_read)
193 inc_page_count(sbi, F2FS_WRITEBACK);
194
195 if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
196 io->fio.rw != fio->rw))
197 __submit_merged_bio(io);
198alloc_new:
199 if (io->bio == NULL) {
200 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
201
202 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
203 io->fio = *fio;
204 }
205
206 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
207 PAGE_CACHE_SIZE) {
208 __submit_merged_bio(io);
209 goto alloc_new;
210 }
211
212 io->last_block_in_bio = blk_addr;
213
214 mutex_unlock(&io->io_mutex);
215 trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
216}
217
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800218/*
219 * Lock ordering for the change of data block address:
220 * ->data_page
221 * ->node_page
222 * update block addresses in the node page
223 */
224static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
225{
226 struct f2fs_node *rn;
227 __le32 *addr_array;
228 struct page *node_page = dn->node_page;
229 unsigned int ofs_in_node = dn->ofs_in_node;
230
Changman Leeb1a94e82013-11-15 10:42:51 +0900231 f2fs_wait_on_page_writeback(node_page, NODE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800232
233 rn = F2FS_NODE(node_page);
234
235 /* Get physical address of data block */
236 addr_array = blkaddr_in_node(rn);
237 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
238 set_page_dirty(node_page);
239}
240
241int reserve_new_block(struct dnode_of_data *dn)
242{
243 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
244
Changman Leeb1a94e82013-11-15 10:42:51 +0900245 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800246 return -EPERM;
Changman Leeb1a94e82013-11-15 10:42:51 +0900247 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800248 return -ENOSPC;
249
250 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
251
252 __set_data_blkaddr(dn, NEW_ADDR);
253 dn->data_blkaddr = NEW_ADDR;
Changman Leeb1a94e82013-11-15 10:42:51 +0900254 mark_inode_dirty(dn->inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800255 sync_inode_page(dn);
256 return 0;
257}
258
Changman Leeb1a94e82013-11-15 10:42:51 +0900259int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
260{
261 bool need_put = dn->inode_page ? false : true;
262 int err;
263
264 /* if inode_page exists, index should be zero */
265 f2fs_bug_on(!need_put && index);
266
267 err = get_dnode_of_data(dn, index, ALLOC_NODE);
268 if (err)
269 return err;
270
271 if (dn->data_blkaddr == NULL_ADDR)
272 err = reserve_new_block(dn);
273 if (err || need_put)
274 f2fs_put_dnode(dn);
275 return err;
276}
277
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800278static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
279 struct buffer_head *bh_result)
280{
281 struct f2fs_inode_info *fi = F2FS_I(inode);
282 pgoff_t start_fofs, end_fofs;
283 block_t start_blkaddr;
284
Changman Leeb1a94e82013-11-15 10:42:51 +0900285 if (is_inode_flag_set(fi, FI_NO_EXTENT))
286 return 0;
287
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800288 read_lock(&fi->ext.ext_lock);
289 if (fi->ext.len == 0) {
290 read_unlock(&fi->ext.ext_lock);
291 return 0;
292 }
293
294 stat_inc_total_hit(inode->i_sb);
295
296 start_fofs = fi->ext.fofs;
297 end_fofs = fi->ext.fofs + fi->ext.len - 1;
298 start_blkaddr = fi->ext.blk_addr;
299
300 if (pgofs >= start_fofs && pgofs <= end_fofs) {
301 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
302 size_t count;
303
304 clear_buffer_new(bh_result);
305 map_bh(bh_result, inode->i_sb,
306 start_blkaddr + pgofs - start_fofs);
307 count = end_fofs - pgofs + 1;
308 if (count < (UINT_MAX >> blkbits))
309 bh_result->b_size = (count << blkbits);
310 else
311 bh_result->b_size = UINT_MAX;
312
313 stat_inc_read_hit(inode->i_sb);
314 read_unlock(&fi->ext.ext_lock);
315 return 1;
316 }
317 read_unlock(&fi->ext.ext_lock);
318 return 0;
319}
320
321void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
322{
323 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
324 pgoff_t fofs, start_fofs, end_fofs;
325 block_t start_blkaddr, end_blkaddr;
Changman Leeb1a94e82013-11-15 10:42:51 +0900326 int need_update = true;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800327
328 f2fs_bug_on(blk_addr == NEW_ADDR);
329 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
330 dn->ofs_in_node;
331
332 /* Update the page address in the parent node */
333 __set_data_blkaddr(dn, blk_addr);
334
Changman Leeb1a94e82013-11-15 10:42:51 +0900335 if (is_inode_flag_set(fi, FI_NO_EXTENT))
336 return;
337
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800338 write_lock(&fi->ext.ext_lock);
339
340 start_fofs = fi->ext.fofs;
341 end_fofs = fi->ext.fofs + fi->ext.len - 1;
342 start_blkaddr = fi->ext.blk_addr;
343 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
344
345 /* Drop and initialize the matched extent */
346 if (fi->ext.len == 1 && fofs == start_fofs)
347 fi->ext.len = 0;
348
349 /* Initial extent */
350 if (fi->ext.len == 0) {
351 if (blk_addr != NULL_ADDR) {
352 fi->ext.fofs = fofs;
353 fi->ext.blk_addr = blk_addr;
354 fi->ext.len = 1;
355 }
356 goto end_update;
357 }
358
359 /* Front merge */
360 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
361 fi->ext.fofs--;
362 fi->ext.blk_addr--;
363 fi->ext.len++;
364 goto end_update;
365 }
366
367 /* Back merge */
368 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
369 fi->ext.len++;
370 goto end_update;
371 }
372
373 /* Split the existing extent */
374 if (fi->ext.len > 1 &&
375 fofs >= start_fofs && fofs <= end_fofs) {
376 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
377 fi->ext.len = fofs - start_fofs;
378 } else {
379 fi->ext.fofs = fofs + 1;
380 fi->ext.blk_addr = start_blkaddr +
381 fofs - start_fofs + 1;
382 fi->ext.len -= fofs - start_fofs + 1;
383 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900384 } else {
385 need_update = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800386 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800387
Changman Leeb1a94e82013-11-15 10:42:51 +0900388 /* Finally, if the extent is very fragmented, let's drop the cache. */
389 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
390 fi->ext.len = 0;
391 set_inode_flag(fi, FI_NO_EXTENT);
392 need_update = true;
393 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800394end_update:
395 write_unlock(&fi->ext.ext_lock);
Changman Leeb1a94e82013-11-15 10:42:51 +0900396 if (need_update)
397 sync_inode_page(dn);
398 return;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800399}
400
401struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
402{
403 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
404 struct address_space *mapping = inode->i_mapping;
405 struct dnode_of_data dn;
406 struct page *page;
407 int err;
408
409 page = find_get_page(mapping, index);
410 if (page && PageUptodate(page))
411 return page;
412 f2fs_put_page(page, 0);
413
414 set_new_dnode(&dn, inode, NULL, NULL, 0);
415 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
416 if (err)
417 return ERR_PTR(err);
418 f2fs_put_dnode(&dn);
419
420 if (dn.data_blkaddr == NULL_ADDR)
421 return ERR_PTR(-ENOENT);
422
423 /* By fallocate(), there is no cached page, but with NEW_ADDR */
Changman Leeb1a94e82013-11-15 10:42:51 +0900424 if (unlikely(dn.data_blkaddr == NEW_ADDR))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800425 return ERR_PTR(-EINVAL);
426
Jaegeuk Kim767fa502014-04-29 17:35:10 +0900427 page = grab_cache_page(mapping, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800428 if (!page)
429 return ERR_PTR(-ENOMEM);
430
431 if (PageUptodate(page)) {
432 unlock_page(page);
433 return page;
434 }
435
Changman Leeb1a94e82013-11-15 10:42:51 +0900436 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800437 sync ? READ_SYNC : READA);
Changman Leeb1a94e82013-11-15 10:42:51 +0900438 if (err)
439 return ERR_PTR(err);
440
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800441 if (sync) {
442 wait_on_page_locked(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900443 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800444 f2fs_put_page(page, 0);
445 return ERR_PTR(-EIO);
446 }
447 }
448 return page;
449}
450
451/*
452 * If it tries to access a hole, return an error.
453 * Because, the callers, functions in dir.c and GC, should be able to know
454 * whether this page exists or not.
455 */
456struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
457{
458 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
459 struct address_space *mapping = inode->i_mapping;
460 struct dnode_of_data dn;
461 struct page *page;
462 int err;
463
464repeat:
Jaegeuk Kim767fa502014-04-29 17:35:10 +0900465 page = grab_cache_page(mapping, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800466 if (!page)
467 return ERR_PTR(-ENOMEM);
468
469 set_new_dnode(&dn, inode, NULL, NULL, 0);
470 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
471 if (err) {
472 f2fs_put_page(page, 1);
473 return ERR_PTR(err);
474 }
475 f2fs_put_dnode(&dn);
476
Changman Leeb1a94e82013-11-15 10:42:51 +0900477 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800478 f2fs_put_page(page, 1);
479 return ERR_PTR(-ENOENT);
480 }
481
482 if (PageUptodate(page))
483 return page;
484
485 /*
486 * A new dentry page is allocated but not able to be written, since its
487 * new inode page couldn't be allocated due to -ENOSPC.
488 * In such the case, its blkaddr can be remained as NEW_ADDR.
489 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
490 */
491 if (dn.data_blkaddr == NEW_ADDR) {
492 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
493 SetPageUptodate(page);
494 return page;
495 }
496
Changman Leeb1a94e82013-11-15 10:42:51 +0900497 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800498 if (err)
499 return ERR_PTR(err);
500
501 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900502 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800503 f2fs_put_page(page, 1);
504 return ERR_PTR(-EIO);
505 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900506 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800507 f2fs_put_page(page, 1);
508 goto repeat;
509 }
510 return page;
511}
512
513/*
514 * Caller ensures that this data page is never allocated.
515 * A new zero-filled data page is allocated in the page cache.
516 *
Changman Leeb1a94e82013-11-15 10:42:51 +0900517 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
518 * f2fs_unlock_op().
519 * Note that, ipage is set only by make_empty_dir.
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800520 */
521struct page *get_new_data_page(struct inode *inode,
Changman Leeb1a94e82013-11-15 10:42:51 +0900522 struct page *ipage, pgoff_t index, bool new_i_size)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800523{
524 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
525 struct address_space *mapping = inode->i_mapping;
526 struct page *page;
527 struct dnode_of_data dn;
528 int err;
529
Changman Leeb1a94e82013-11-15 10:42:51 +0900530 set_new_dnode(&dn, inode, ipage, NULL, 0);
531 err = f2fs_reserve_block(&dn, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800532 if (err)
533 return ERR_PTR(err);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800534repeat:
535 page = grab_cache_page(mapping, index);
Changman Leeb1a94e82013-11-15 10:42:51 +0900536 if (!page) {
537 err = -ENOMEM;
538 goto put_err;
539 }
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800540
541 if (PageUptodate(page))
542 return page;
543
544 if (dn.data_blkaddr == NEW_ADDR) {
545 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
546 SetPageUptodate(page);
547 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900548 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
549 READ_SYNC);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800550 if (err)
Changman Leeb1a94e82013-11-15 10:42:51 +0900551 goto put_err;
552
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800553 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900554 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800555 f2fs_put_page(page, 1);
Changman Leeb1a94e82013-11-15 10:42:51 +0900556 err = -EIO;
557 goto put_err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800558 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900559 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800560 f2fs_put_page(page, 1);
561 goto repeat;
562 }
563 }
564
565 if (new_i_size &&
566 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
567 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
568 /* Only the directory inode sets new_i_size */
569 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800570 }
571 return page;
Changman Leeb1a94e82013-11-15 10:42:51 +0900572
573put_err:
574 f2fs_put_dnode(&dn);
575 return ERR_PTR(err);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800576}
577
Changman Leeb1a94e82013-11-15 10:42:51 +0900578static int __allocate_data_block(struct dnode_of_data *dn)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800579{
Changman Leeb1a94e82013-11-15 10:42:51 +0900580 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
581 struct f2fs_summary sum;
582 block_t new_blkaddr;
583 struct node_info ni;
584 int type;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800585
Changman Leeb1a94e82013-11-15 10:42:51 +0900586 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
587 return -EPERM;
588 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
589 return -ENOSPC;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800590
Changman Leeb1a94e82013-11-15 10:42:51 +0900591 __set_data_blkaddr(dn, NEW_ADDR);
592 dn->data_blkaddr = NEW_ADDR;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800593
Changman Leeb1a94e82013-11-15 10:42:51 +0900594 get_node_info(sbi, dn->nid, &ni);
595 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800596
Changman Leeb1a94e82013-11-15 10:42:51 +0900597 type = CURSEG_WARM_DATA;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800598
Changman Leeb1a94e82013-11-15 10:42:51 +0900599 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800600
Changman Leeb1a94e82013-11-15 10:42:51 +0900601 /* direct IO doesn't use extent cache to maximize the performance */
602 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
603 update_extent_cache(new_blkaddr, dn);
604 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800605
Changman Leeb1a94e82013-11-15 10:42:51 +0900606 dn->data_blkaddr = new_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800607 return 0;
608}
609
610/*
Changman Leeb1a94e82013-11-15 10:42:51 +0900611 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
612 * If original data blocks are allocated, then give them to blockdev.
613 * Otherwise,
614 * a. preallocate requested block addresses
615 * b. do not use extent cache for better performance
616 * c. give the block addresses to blockdev
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800617 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900618static int get_data_block(struct inode *inode, sector_t iblock,
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800619 struct buffer_head *bh_result, int create)
620{
Changman Leeb1a94e82013-11-15 10:42:51 +0900621 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800622 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
623 unsigned maxblocks = bh_result->b_size >> blkbits;
624 struct dnode_of_data dn;
Changman Leeb1a94e82013-11-15 10:42:51 +0900625 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
626 pgoff_t pgofs, end_offset;
627 int err = 0, ofs = 1;
628 bool allocated = false;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800629
630 /* Get the page offset from the block offset(iblock) */
631 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
632
Changman Leeb1a94e82013-11-15 10:42:51 +0900633 if (check_extent_cache(inode, pgofs, bh_result))
634 goto out;
635
636 if (create)
637 f2fs_lock_op(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800638
639 /* When reading holes, we need its node page */
640 set_new_dnode(&dn, inode, NULL, NULL, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900641 err = get_dnode_of_data(&dn, pgofs, mode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800642 if (err) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900643 if (err == -ENOENT)
644 err = 0;
645 goto unlock_out;
646 }
647 if (dn.data_blkaddr == NEW_ADDR)
648 goto put_out;
649
650 if (dn.data_blkaddr != NULL_ADDR) {
651 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
652 } else if (create) {
653 err = __allocate_data_block(&dn);
654 if (err)
655 goto put_out;
656 allocated = true;
657 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
658 } else {
659 goto put_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800660 }
661
Chao Yu90e712d2014-04-26 19:59:52 +0800662 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Changman Leeb1a94e82013-11-15 10:42:51 +0900663 bh_result->b_size = (((size_t)1) << blkbits);
664 dn.ofs_in_node++;
665 pgofs++;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800666
Changman Leeb1a94e82013-11-15 10:42:51 +0900667get_next:
668 if (dn.ofs_in_node >= end_offset) {
669 if (allocated)
670 sync_inode_page(&dn);
671 allocated = false;
672 f2fs_put_dnode(&dn);
673
674 set_new_dnode(&dn, inode, NULL, NULL, 0);
675 err = get_dnode_of_data(&dn, pgofs, mode);
676 if (err) {
677 if (err == -ENOENT)
678 err = 0;
679 goto unlock_out;
680 }
681 if (dn.data_blkaddr == NEW_ADDR)
682 goto put_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800683
Chao Yu90e712d2014-04-26 19:59:52 +0800684 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800685 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900686
687 if (maxblocks > (bh_result->b_size >> blkbits)) {
688 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
689 if (blkaddr == NULL_ADDR && create) {
690 err = __allocate_data_block(&dn);
691 if (err)
692 goto sync_out;
693 allocated = true;
694 blkaddr = dn.data_blkaddr;
695 }
696 /* Give more consecutive addresses for the read ahead */
697 if (blkaddr == (bh_result->b_blocknr + ofs)) {
698 ofs++;
699 dn.ofs_in_node++;
700 pgofs++;
701 bh_result->b_size += (((size_t)1) << blkbits);
702 goto get_next;
703 }
704 }
705sync_out:
706 if (allocated)
707 sync_inode_page(&dn);
708put_out:
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800709 f2fs_put_dnode(&dn);
Changman Leeb1a94e82013-11-15 10:42:51 +0900710unlock_out:
711 if (create)
712 f2fs_unlock_op(sbi);
713out:
714 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
715 return err;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800716}
717
Jaegeuk Kim8ff1d522014-06-08 04:30:14 +0900718int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
719 u64 start, u64 len)
720{
721 return generic_block_fiemap(inode, fieinfo, start, len, get_data_block);
722}
723
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800724static int f2fs_read_data_page(struct file *file, struct page *page)
725{
Changman Leeb1a94e82013-11-15 10:42:51 +0900726 struct inode *inode = page->mapping->host;
727 int ret;
728
Chao Yu12d38fc2014-05-06 16:53:08 +0800729 trace_f2fs_readpage(page, DATA);
730
Changman Leeb1a94e82013-11-15 10:42:51 +0900731 /* If the file has inline data, try to read it directlly */
732 if (f2fs_has_inline_data(inode))
733 ret = f2fs_read_inline_data(inode, page);
734 else
735 ret = mpage_readpage(page, get_data_block);
736
737 return ret;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800738}
739
740static int f2fs_read_data_pages(struct file *file,
741 struct address_space *mapping,
742 struct list_head *pages, unsigned nr_pages)
743{
Changman Leeb1a94e82013-11-15 10:42:51 +0900744 struct inode *inode = file->f_mapping->host;
745
746 /* If the file has inline data, skip readpages */
747 if (f2fs_has_inline_data(inode))
748 return 0;
749
750 return mpage_readpages(mapping, pages, nr_pages, get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800751}
752
Changman Leeb1a94e82013-11-15 10:42:51 +0900753int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800754{
755 struct inode *inode = page->mapping->host;
Changman Leeb1a94e82013-11-15 10:42:51 +0900756 block_t old_blkaddr, new_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800757 struct dnode_of_data dn;
758 int err = 0;
759
760 set_new_dnode(&dn, inode, NULL, NULL, 0);
761 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
762 if (err)
763 return err;
764
Changman Leeb1a94e82013-11-15 10:42:51 +0900765 old_blkaddr = dn.data_blkaddr;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800766
767 /* This page is already truncated */
Changman Leeb1a94e82013-11-15 10:42:51 +0900768 if (old_blkaddr == NULL_ADDR)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800769 goto out_writepage;
770
771 set_page_writeback(page);
772
773 /*
774 * If current allocation needs SSR,
775 * it had better in-place writes for updated data.
776 */
Changman Leeb1a94e82013-11-15 10:42:51 +0900777 if (unlikely(old_blkaddr != NEW_ADDR &&
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800778 !is_cold_data(page) &&
779 need_inplace_update(inode))) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900780 rewrite_data_page(page, old_blkaddr, fio);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800781 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900782 write_data_page(page, &dn, &new_blkaddr, fio);
783 update_extent_cache(new_blkaddr, &dn);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800784 }
785out_writepage:
786 f2fs_put_dnode(&dn);
787 return err;
788}
789
790static int f2fs_write_data_page(struct page *page,
791 struct writeback_control *wbc)
792{
793 struct inode *inode = page->mapping->host;
794 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
795 loff_t i_size = i_size_read(inode);
796 const pgoff_t end_index = ((unsigned long long) i_size)
797 >> PAGE_CACHE_SHIFT;
Changman Leeb1a94e82013-11-15 10:42:51 +0900798 unsigned offset = 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800799 bool need_balance_fs = false;
800 int err = 0;
Changman Leeb1a94e82013-11-15 10:42:51 +0900801 struct f2fs_io_info fio = {
802 .type = DATA,
803 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
804 };
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800805
Chao Yu327cb6d2014-05-06 16:48:26 +0800806 trace_f2fs_writepage(page, DATA);
807
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800808 if (page->index < end_index)
809 goto write;
810
811 /*
812 * If the offset is out-of-range of file size,
813 * this page does not have to be written to disk.
814 */
815 offset = i_size & (PAGE_CACHE_SIZE - 1);
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900816 if ((page->index >= end_index + 1) || !offset)
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800817 goto out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800818
819 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
820write:
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900821 if (unlikely(sbi->por_doing))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800822 goto redirty_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800823
824 /* Dentry blocks are controlled by checkpoint */
825 if (S_ISDIR(inode->i_mode)) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900826 err = do_write_data_page(page, &fio);
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900827 goto done;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800828 }
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900829
830 if (!wbc->for_reclaim)
831 need_balance_fs = true;
832 else if (has_not_enough_free_secs(sbi, 0))
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800833 goto redirty_out;
834
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900835 f2fs_lock_op(sbi);
836 if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
837 err = f2fs_write_inline_data(inode, page, offset);
838 else
839 err = do_write_data_page(page, &fio);
840 f2fs_unlock_op(sbi);
841done:
842 if (err && err != -ENOENT)
843 goto redirty_out;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800844
845 clear_cold_data(page);
846out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900847 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800848 unlock_page(page);
849 if (need_balance_fs)
850 f2fs_balance_fs(sbi);
Jaegeuk Kim8edabc72014-04-24 09:49:52 +0900851 if (wbc->for_reclaim)
852 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800853 return 0;
854
855redirty_out:
Jaegeuk Kimdc8c2462014-04-15 16:04:15 +0900856 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim78ae57e2014-02-17 19:29:27 +0900857 return AOP_WRITEPAGE_ACTIVATE;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800858}
859
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800860static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
861 void *data)
862{
863 struct address_space *mapping = data;
864 int ret = mapping->a_ops->writepage(page, wbc);
865 mapping_set_error(mapping, ret);
866 return ret;
867}
868
869static int f2fs_write_data_pages(struct address_space *mapping,
870 struct writeback_control *wbc)
871{
872 struct inode *inode = mapping->host;
873 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
874 bool locked = false;
875 int ret;
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900876 long diff;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800877
Chao Yub4d85492014-05-06 16:51:24 +0800878 trace_f2fs_writepages(mapping->host, wbc, DATA);
879
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800880 /* deal with chardevs and other special file */
881 if (!mapping->a_ops->writepage)
882 return 0;
883
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +0900884 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kim250c7692014-04-16 10:47:06 +0900885 get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA) &&
886 available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kim823d59f2014-03-18 13:43:05 +0900887 goto skip_write;
Jaegeuk Kimc7f9f432014-03-18 12:40:49 +0900888
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900889 diff = nr_pages_to_write(sbi, DATA, wbc);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800890
891 if (!S_ISDIR(inode->i_mode)) {
892 mutex_lock(&sbi->writepages);
893 locked = true;
894 }
895 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
896 if (locked)
897 mutex_unlock(&sbi->writepages);
Changman Leeb1a94e82013-11-15 10:42:51 +0900898
899 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800900
901 remove_dirty_dir_inode(inode);
902
Jaegeuk Kim1f1eaf42014-03-18 13:47:11 +0900903 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800904 return ret;
Jaegeuk Kim823d59f2014-03-18 13:43:05 +0900905
906skip_write:
907 wbc->pages_skipped += get_dirty_dents(inode);
908 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800909}
910
911static int f2fs_write_begin(struct file *file, struct address_space *mapping,
912 loff_t pos, unsigned len, unsigned flags,
913 struct page **pagep, void **fsdata)
914{
915 struct inode *inode = mapping->host;
916 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
917 struct page *page;
918 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
919 struct dnode_of_data dn;
920 int err = 0;
921
Chao Yudf7a5962014-05-06 16:46:04 +0800922 trace_f2fs_write_begin(inode, pos, len, flags);
923
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800924 f2fs_balance_fs(sbi);
925repeat:
Changman Leeb1a94e82013-11-15 10:42:51 +0900926 err = f2fs_convert_inline_data(inode, pos + len);
927 if (err)
928 return err;
929
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800930 page = grab_cache_page_write_begin(mapping, index, flags);
931 if (!page)
932 return -ENOMEM;
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900933
934 /* to avoid latency during memory pressure */
935 unlock_page(page);
936
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800937 *pagep = page;
938
Changman Leeb1a94e82013-11-15 10:42:51 +0900939 if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA)
940 goto inline_data;
941
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800942 f2fs_lock_op(sbi);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800943 set_new_dnode(&dn, inode, NULL, NULL, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900944 err = f2fs_reserve_block(&dn, index);
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800945 f2fs_unlock_op(sbi);
946
Changman Leeb1a94e82013-11-15 10:42:51 +0900947 if (err) {
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900948 f2fs_put_page(page, 0);
Changman Leeb1a94e82013-11-15 10:42:51 +0900949 return err;
950 }
951inline_data:
Jaegeuk Kimb3052cf2014-04-30 09:22:45 +0900952 lock_page(page);
953 if (unlikely(page->mapping != mapping)) {
954 f2fs_put_page(page, 1);
955 goto repeat;
956 }
957
958 f2fs_wait_on_page_writeback(page, DATA);
959
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800960 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
961 return 0;
962
963 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
964 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
965 unsigned end = start + len;
966
967 /* Reading beyond i_size is simple: memset to zero */
968 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
969 goto out;
970 }
971
972 if (dn.data_blkaddr == NEW_ADDR) {
973 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
974 } else {
Chao Yu3d1d6d92014-03-29 15:30:40 +0800975 if (f2fs_has_inline_data(inode)) {
Changman Leeb1a94e82013-11-15 10:42:51 +0900976 err = f2fs_read_inline_data(inode, page);
Chao Yu3d1d6d92014-03-29 15:30:40 +0800977 if (err) {
978 page_cache_release(page);
979 return err;
980 }
981 } else {
Changman Leeb1a94e82013-11-15 10:42:51 +0900982 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
983 READ_SYNC);
Chao Yu3d1d6d92014-03-29 15:30:40 +0800984 if (err)
985 return err;
986 }
987
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800988 lock_page(page);
Changman Leeb1a94e82013-11-15 10:42:51 +0900989 if (unlikely(!PageUptodate(page))) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800990 f2fs_put_page(page, 1);
991 return -EIO;
992 }
Changman Leeb1a94e82013-11-15 10:42:51 +0900993 if (unlikely(page->mapping != mapping)) {
Linus Torvalds8005ecc2012-12-20 13:54:51 -0800994 f2fs_put_page(page, 1);
995 goto repeat;
996 }
997 }
998out:
999 SetPageUptodate(page);
1000 clear_cold_data(page);
1001 return 0;
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001002}
1003
1004static int f2fs_write_end(struct file *file,
1005 struct address_space *mapping,
1006 loff_t pos, unsigned len, unsigned copied,
1007 struct page *page, void *fsdata)
1008{
1009 struct inode *inode = page->mapping->host;
1010
Chao Yu3f1ac6d2014-05-06 16:47:23 +08001011 trace_f2fs_write_end(inode, pos, len, copied);
1012
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001013 SetPageUptodate(page);
1014 set_page_dirty(page);
1015
1016 if (pos + copied > i_size_read(inode)) {
1017 i_size_write(inode, pos + copied);
1018 mark_inode_dirty(inode);
1019 update_inode_page(inode);
1020 }
1021
Changman Leeb1a94e82013-11-15 10:42:51 +09001022 f2fs_put_page(page, 1);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001023 return copied;
1024}
1025
Changman Leeb1a94e82013-11-15 10:42:51 +09001026static int check_direct_IO(struct inode *inode, int rw,
1027 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1028{
1029 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
1030 int i;
1031
1032 if (rw == READ)
1033 return 0;
1034
1035 if (offset & blocksize_mask)
1036 return -EINVAL;
1037
1038 for (i = 0; i < nr_segs; i++)
1039 if (iov[i].iov_len & blocksize_mask)
1040 return -EINVAL;
1041 return 0;
1042}
1043
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001044static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
1045 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
1046{
1047 struct file *file = iocb->ki_filp;
1048 struct inode *inode = file->f_mapping->host;
1049
Changman Leeb1a94e82013-11-15 10:42:51 +09001050 /* Let buffer I/O handle the inline data case. */
1051 if (f2fs_has_inline_data(inode))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001052 return 0;
1053
Changman Leeb1a94e82013-11-15 10:42:51 +09001054 if (check_direct_IO(inode, rw, iov, offset, nr_segs))
1055 return 0;
1056
Jaegeuk Kim44f7a3b2014-06-04 00:39:42 +09001057 /* clear fsync mark to recover these blocks */
1058 fsync_mark_clear(F2FS_SB(inode->i_sb), inode->i_ino);
1059
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001060 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
Changman Leeb1a94e82013-11-15 10:42:51 +09001061 get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001062}
1063
1064static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
1065{
1066 struct inode *inode = page->mapping->host;
Jaegeuk Kim9694e662014-02-07 10:00:06 +09001067 if (PageDirty(page))
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001068 inode_dec_dirty_dents(inode);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001069 ClearPagePrivate(page);
1070}
1071
1072static int f2fs_release_data_page(struct page *page, gfp_t wait)
1073{
1074 ClearPagePrivate(page);
1075 return 1;
1076}
1077
1078static int f2fs_set_data_page_dirty(struct page *page)
1079{
1080 struct address_space *mapping = page->mapping;
1081 struct inode *inode = mapping->host;
1082
1083 trace_f2fs_set_page_dirty(page, DATA);
1084
1085 SetPageUptodate(page);
Changman Leeb1a94e82013-11-15 10:42:51 +09001086 mark_inode_dirty(inode);
1087
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001088 if (!PageDirty(page)) {
1089 __set_page_dirty_nobuffers(page);
1090 set_dirty_dir_page(inode, page);
1091 return 1;
1092 }
1093 return 0;
1094}
1095
1096static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1097{
Chao Yu11237352014-04-22 13:34:01 +08001098 struct inode *inode = mapping->host;
1099
1100 if (f2fs_has_inline_data(inode))
1101 return 0;
1102
Changman Leeb1a94e82013-11-15 10:42:51 +09001103 return generic_block_bmap(mapping, block, get_data_block);
Linus Torvalds8005ecc2012-12-20 13:54:51 -08001104}
1105
1106const struct address_space_operations f2fs_dblock_aops = {
1107 .readpage = f2fs_read_data_page,
1108 .readpages = f2fs_read_data_pages,
1109 .writepage = f2fs_write_data_page,
1110 .writepages = f2fs_write_data_pages,
1111 .write_begin = f2fs_write_begin,
1112 .write_end = f2fs_write_end,
1113 .set_page_dirty = f2fs_set_data_page_dirty,
1114 .invalidatepage = f2fs_invalidate_data_page,
1115 .releasepage = f2fs_release_data_page,
1116 .direct_IO = f2fs_direct_IO,
1117 .bmap = f2fs_bmap,
1118};