blob: af7454939362ee3e6c34e7c8dddcb4bdbc7afe7e [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
Kent Overstreeta27bb332013-05-07 16:19:08 -070015#include <linux/aio.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090016#include <linux/writeback.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010020#include <linux/prefetch.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090021
22#include "f2fs.h"
23#include "node.h"
24#include "segment.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090025#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090026
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090027/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090028 * Lock ordering for the change of data block address:
29 * ->data_page
30 * ->node_page
31 * update block addresses in the node page
32 */
33static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
34{
35 struct f2fs_node *rn;
36 __le32 *addr_array;
37 struct page *node_page = dn->node_page;
38 unsigned int ofs_in_node = dn->ofs_in_node;
39
40 wait_on_page_writeback(node_page);
41
42 rn = (struct f2fs_node *)page_address(node_page);
43
44 /* Get physical address of data block */
45 addr_array = blkaddr_in_node(rn);
46 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 set_page_dirty(node_page);
48}
49
50int reserve_new_block(struct dnode_of_data *dn)
51{
52 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
53
54 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
55 return -EPERM;
56 if (!inc_valid_block_count(sbi, dn->inode, 1))
57 return -ENOSPC;
58
Namjae Jeonc01e2852013-04-23 17:00:52 +090059 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
60
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090061 __set_data_blkaddr(dn, NEW_ADDR);
62 dn->data_blkaddr = NEW_ADDR;
63 sync_inode_page(dn);
64 return 0;
65}
66
67static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
68 struct buffer_head *bh_result)
69{
70 struct f2fs_inode_info *fi = F2FS_I(inode);
71 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
72 pgoff_t start_fofs, end_fofs;
73 block_t start_blkaddr;
74
75 read_lock(&fi->ext.ext_lock);
76 if (fi->ext.len == 0) {
77 read_unlock(&fi->ext.ext_lock);
78 return 0;
79 }
80
81 sbi->total_hit_ext++;
82 start_fofs = fi->ext.fofs;
83 end_fofs = fi->ext.fofs + fi->ext.len - 1;
84 start_blkaddr = fi->ext.blk_addr;
85
86 if (pgofs >= start_fofs && pgofs <= end_fofs) {
87 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
88 size_t count;
89
90 clear_buffer_new(bh_result);
91 map_bh(bh_result, inode->i_sb,
92 start_blkaddr + pgofs - start_fofs);
93 count = end_fofs - pgofs + 1;
94 if (count < (UINT_MAX >> blkbits))
95 bh_result->b_size = (count << blkbits);
96 else
97 bh_result->b_size = UINT_MAX;
98
99 sbi->read_hit_ext++;
100 read_unlock(&fi->ext.ext_lock);
101 return 1;
102 }
103 read_unlock(&fi->ext.ext_lock);
104 return 0;
105}
106
107void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
108{
109 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
110 pgoff_t fofs, start_fofs, end_fofs;
111 block_t start_blkaddr, end_blkaddr;
112
113 BUG_ON(blk_addr == NEW_ADDR);
114 fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
115
116 /* Update the page address in the parent node */
117 __set_data_blkaddr(dn, blk_addr);
118
119 write_lock(&fi->ext.ext_lock);
120
121 start_fofs = fi->ext.fofs;
122 end_fofs = fi->ext.fofs + fi->ext.len - 1;
123 start_blkaddr = fi->ext.blk_addr;
124 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
125
126 /* Drop and initialize the matched extent */
127 if (fi->ext.len == 1 && fofs == start_fofs)
128 fi->ext.len = 0;
129
130 /* Initial extent */
131 if (fi->ext.len == 0) {
132 if (blk_addr != NULL_ADDR) {
133 fi->ext.fofs = fofs;
134 fi->ext.blk_addr = blk_addr;
135 fi->ext.len = 1;
136 }
137 goto end_update;
138 }
139
Namjae Jeon6224da82013-04-06 14:44:32 +0900140 /* Front merge */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900141 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
142 fi->ext.fofs--;
143 fi->ext.blk_addr--;
144 fi->ext.len++;
145 goto end_update;
146 }
147
148 /* Back merge */
149 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
150 fi->ext.len++;
151 goto end_update;
152 }
153
154 /* Split the existing extent */
155 if (fi->ext.len > 1 &&
156 fofs >= start_fofs && fofs <= end_fofs) {
157 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
158 fi->ext.len = fofs - start_fofs;
159 } else {
160 fi->ext.fofs = fofs + 1;
161 fi->ext.blk_addr = start_blkaddr +
162 fofs - start_fofs + 1;
163 fi->ext.len -= fofs - start_fofs + 1;
164 }
165 goto end_update;
166 }
167 write_unlock(&fi->ext.ext_lock);
168 return;
169
170end_update:
171 write_unlock(&fi->ext.ext_lock);
172 sync_inode_page(dn);
173 return;
174}
175
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900176struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900177{
178 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
179 struct address_space *mapping = inode->i_mapping;
180 struct dnode_of_data dn;
181 struct page *page;
182 int err;
183
184 page = find_get_page(mapping, index);
185 if (page && PageUptodate(page))
186 return page;
187 f2fs_put_page(page, 0);
188
189 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900190 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900191 if (err)
192 return ERR_PTR(err);
193 f2fs_put_dnode(&dn);
194
195 if (dn.data_blkaddr == NULL_ADDR)
196 return ERR_PTR(-ENOENT);
197
198 /* By fallocate(), there is no cached page, but with NEW_ADDR */
199 if (dn.data_blkaddr == NEW_ADDR)
200 return ERR_PTR(-EINVAL);
201
202 page = grab_cache_page(mapping, index);
203 if (!page)
204 return ERR_PTR(-ENOMEM);
205
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900206 if (PageUptodate(page)) {
207 unlock_page(page);
208 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900209 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900210
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900211 err = f2fs_readpage(sbi, page, dn.data_blkaddr,
212 sync ? READ_SYNC : READA);
213 if (sync) {
214 wait_on_page_locked(page);
215 if (!PageUptodate(page)) {
216 f2fs_put_page(page, 0);
217 return ERR_PTR(-EIO);
218 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900219 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900220 return page;
221}
222
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900223/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900224 * If it tries to access a hole, return an error.
225 * Because, the callers, functions in dir.c and GC, should be able to know
226 * whether this page exists or not.
227 */
228struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
229{
230 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
231 struct address_space *mapping = inode->i_mapping;
232 struct dnode_of_data dn;
233 struct page *page;
234 int err;
235
Jaegeuk Kimafcb7ca2013-04-26 11:55:17 +0900236repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900237 page = grab_cache_page(mapping, index);
238 if (!page)
239 return ERR_PTR(-ENOMEM);
240
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900241 set_new_dnode(&dn, inode, NULL, NULL, 0);
242 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
243 if (err) {
244 f2fs_put_page(page, 1);
245 return ERR_PTR(err);
246 }
247 f2fs_put_dnode(&dn);
248
249 if (dn.data_blkaddr == NULL_ADDR) {
250 f2fs_put_page(page, 1);
251 return ERR_PTR(-ENOENT);
252 }
253
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900254 if (PageUptodate(page))
255 return page;
256
257 BUG_ON(dn.data_blkaddr == NEW_ADDR);
258 BUG_ON(dn.data_blkaddr == NULL_ADDR);
259
260 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900261 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900262 return ERR_PTR(err);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900263
264 lock_page(page);
265 if (!PageUptodate(page)) {
266 f2fs_put_page(page, 1);
267 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900268 }
Jaegeuk Kimafcb7ca2013-04-26 11:55:17 +0900269 if (page->mapping != mapping) {
270 f2fs_put_page(page, 1);
271 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900272 }
273 return page;
274}
275
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900276/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900277 * Caller ensures that this data page is never allocated.
278 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +0900279 *
280 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
281 * mutex_unlock_op().
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900282 */
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900283struct page *get_new_data_page(struct inode *inode,
284 struct page *npage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900285{
286 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
287 struct address_space *mapping = inode->i_mapping;
288 struct page *page;
289 struct dnode_of_data dn;
290 int err;
291
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900292 set_new_dnode(&dn, inode, npage, npage, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900293 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900294 if (err)
295 return ERR_PTR(err);
296
297 if (dn.data_blkaddr == NULL_ADDR) {
298 if (reserve_new_block(&dn)) {
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900299 if (!npage)
300 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900301 return ERR_PTR(-ENOSPC);
302 }
303 }
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900304 if (!npage)
305 f2fs_put_dnode(&dn);
Jaegeuk Kimafcb7ca2013-04-26 11:55:17 +0900306repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900307 page = grab_cache_page(mapping, index);
308 if (!page)
309 return ERR_PTR(-ENOMEM);
310
311 if (PageUptodate(page))
312 return page;
313
314 if (dn.data_blkaddr == NEW_ADDR) {
315 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900316 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900317 } else {
318 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900319 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900320 return ERR_PTR(err);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900321 lock_page(page);
322 if (!PageUptodate(page)) {
323 f2fs_put_page(page, 1);
324 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900325 }
Jaegeuk Kimafcb7ca2013-04-26 11:55:17 +0900326 if (page->mapping != mapping) {
327 f2fs_put_page(page, 1);
328 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900329 }
330 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900331
332 if (new_i_size &&
333 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
334 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
335 mark_inode_dirty_sync(inode);
336 }
337 return page;
338}
339
340static void read_end_io(struct bio *bio, int err)
341{
342 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
343 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
344
345 do {
346 struct page *page = bvec->bv_page;
347
348 if (--bvec >= bio->bi_io_vec)
349 prefetchw(&bvec->bv_page->flags);
350
351 if (uptodate) {
352 SetPageUptodate(page);
353 } else {
354 ClearPageUptodate(page);
355 SetPageError(page);
356 }
357 unlock_page(page);
358 } while (bvec >= bio->bi_io_vec);
359 kfree(bio->bi_private);
360 bio_put(bio);
361}
362
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900363/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900364 * Fill the locked page with data located in the block address.
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900365 * Return unlocked page.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900366 */
367int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
368 block_t blk_addr, int type)
369{
370 struct block_device *bdev = sbi->sb->s_bdev;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900371 struct bio *bio;
372
Namjae Jeon848753a2013-04-23 16:38:02 +0900373 trace_f2fs_readpage(page, blk_addr, type);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900374
375 down_read(&sbi->bio_sem);
376
377 /* Allocate a new bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900378 bio = f2fs_bio_alloc(bdev, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900379
380 /* Initialize the bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900381 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900382 bio->bi_end_io = read_end_io;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900383
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900384 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
385 kfree(bio->bi_private);
386 bio_put(bio);
387 up_read(&sbi->bio_sem);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900388 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900389 return -EFAULT;
390 }
391
392 submit_bio(type, bio);
393 up_read(&sbi->bio_sem);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900394 return 0;
395}
396
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900397/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900398 * This function should be used by the data read flow only where it
399 * does not check the "create" flag that indicates block allocation.
400 * The reason for this special functionality is to exploit VFS readahead
401 * mechanism.
402 */
403static int get_data_block_ro(struct inode *inode, sector_t iblock,
404 struct buffer_head *bh_result, int create)
405{
406 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
407 unsigned maxblocks = bh_result->b_size >> blkbits;
408 struct dnode_of_data dn;
409 pgoff_t pgofs;
410 int err;
411
412 /* Get the page offset from the block offset(iblock) */
413 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
414
Namjae Jeon848753a2013-04-23 16:38:02 +0900415 if (check_extent_cache(inode, pgofs, bh_result)) {
416 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900417 return 0;
Namjae Jeon848753a2013-04-23 16:38:02 +0900418 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900419
420 /* When reading holes, we need its node page */
421 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900422 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
Namjae Jeon848753a2013-04-23 16:38:02 +0900423 if (err) {
424 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900425 return (err == -ENOENT) ? 0 : err;
Namjae Jeon848753a2013-04-23 16:38:02 +0900426 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900427
428 /* It does not support data allocation */
429 BUG_ON(create);
430
431 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
432 int i;
433 unsigned int end_offset;
434
435 end_offset = IS_INODE(dn.node_page) ?
436 ADDRS_PER_INODE :
437 ADDRS_PER_BLOCK;
438
439 clear_buffer_new(bh_result);
440
441 /* Give more consecutive addresses for the read ahead */
442 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
443 if (((datablock_addr(dn.node_page,
444 dn.ofs_in_node + i))
445 != (dn.data_blkaddr + i)) || maxblocks == i)
446 break;
447 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
448 bh_result->b_size = (i << blkbits);
449 }
450 f2fs_put_dnode(&dn);
Namjae Jeon848753a2013-04-23 16:38:02 +0900451 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900452 return 0;
453}
454
455static int f2fs_read_data_page(struct file *file, struct page *page)
456{
457 return mpage_readpage(page, get_data_block_ro);
458}
459
460static int f2fs_read_data_pages(struct file *file,
461 struct address_space *mapping,
462 struct list_head *pages, unsigned nr_pages)
463{
464 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
465}
466
467int do_write_data_page(struct page *page)
468{
469 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900470 block_t old_blk_addr, new_blk_addr;
471 struct dnode_of_data dn;
472 int err = 0;
473
474 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900475 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900476 if (err)
477 return err;
478
479 old_blk_addr = dn.data_blkaddr;
480
481 /* This page is already truncated */
482 if (old_blk_addr == NULL_ADDR)
483 goto out_writepage;
484
485 set_page_writeback(page);
486
487 /*
488 * If current allocation needs SSR,
489 * it had better in-place writes for updated data.
490 */
491 if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
492 need_inplace_update(inode)) {
493 rewrite_data_page(F2FS_SB(inode->i_sb), page,
494 old_blk_addr);
495 } else {
496 write_data_page(inode, page, &dn,
497 old_blk_addr, &new_blk_addr);
498 update_extent_cache(new_blk_addr, &dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900499 }
500out_writepage:
501 f2fs_put_dnode(&dn);
502 return err;
503}
504
505static int f2fs_write_data_page(struct page *page,
506 struct writeback_control *wbc)
507{
508 struct inode *inode = page->mapping->host;
509 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
510 loff_t i_size = i_size_read(inode);
511 const pgoff_t end_index = ((unsigned long long) i_size)
512 >> PAGE_CACHE_SHIFT;
513 unsigned offset;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900514 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900515 int err = 0;
516
517 if (page->index < end_index)
Jaegeuk Kim39936832012-11-22 16:21:29 +0900518 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900519
520 /*
521 * If the offset is out-of-range of file size,
522 * this page does not have to be written to disk.
523 */
524 offset = i_size & (PAGE_CACHE_SIZE - 1);
525 if ((page->index >= end_index + 1) || !offset) {
526 if (S_ISDIR(inode->i_mode)) {
527 dec_page_count(sbi, F2FS_DIRTY_DENTS);
528 inode_dec_dirty_dents(inode);
529 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900530 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900531 }
532
533 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900534write:
535 if (sbi->por_doing) {
536 err = AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900537 goto redirty_out;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900538 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900539
Jaegeuk Kim39936832012-11-22 16:21:29 +0900540 /* Dentry blocks are controlled by checkpoint */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900541 if (S_ISDIR(inode->i_mode)) {
542 dec_page_count(sbi, F2FS_DIRTY_DENTS);
543 inode_dec_dirty_dents(inode);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900544 err = do_write_data_page(page);
545 } else {
546 int ilock = mutex_lock_op(sbi);
547 err = do_write_data_page(page);
548 mutex_unlock_op(sbi, ilock);
549 need_balance_fs = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900550 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900551 if (err == -ENOENT)
552 goto out;
553 else if (err)
554 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900555
556 if (wbc->for_reclaim)
557 f2fs_submit_bio(sbi, DATA, true);
558
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900559 clear_cold_data(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900560out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900561 unlock_page(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900562 if (need_balance_fs)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900563 f2fs_balance_fs(sbi);
564 return 0;
565
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900566redirty_out:
567 wbc->pages_skipped++;
568 set_page_dirty(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900569 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900570}
571
572#define MAX_DESIRED_PAGES_WP 4096
573
Namjae Jeonfa9150a2013-01-15 16:45:24 +0900574static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
575 void *data)
576{
577 struct address_space *mapping = data;
578 int ret = mapping->a_ops->writepage(page, wbc);
579 mapping_set_error(mapping, ret);
580 return ret;
581}
582
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900583static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900584 struct writeback_control *wbc)
585{
586 struct inode *inode = mapping->host;
587 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900588 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900589 int ret;
590 long excess_nrtw = 0, desired_nrtw;
591
P J Pcfb185a2013-04-03 11:38:00 +0900592 /* deal with chardevs and other special file */
593 if (!mapping->a_ops->writepage)
594 return 0;
595
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900596 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
597 desired_nrtw = MAX_DESIRED_PAGES_WP;
598 excess_nrtw = desired_nrtw - wbc->nr_to_write;
599 wbc->nr_to_write = desired_nrtw;
600 }
601
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900602 if (!S_ISDIR(inode->i_mode)) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900603 mutex_lock(&sbi->writepages);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900604 locked = true;
605 }
Namjae Jeonfa9150a2013-01-15 16:45:24 +0900606 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900607 if (locked)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900608 mutex_unlock(&sbi->writepages);
609 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
610
611 remove_dirty_dir_inode(inode);
612
613 wbc->nr_to_write -= excess_nrtw;
614 return ret;
615}
616
617static int f2fs_write_begin(struct file *file, struct address_space *mapping,
618 loff_t pos, unsigned len, unsigned flags,
619 struct page **pagep, void **fsdata)
620{
621 struct inode *inode = mapping->host;
622 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
623 struct page *page;
624 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
625 struct dnode_of_data dn;
626 int err = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900627 int ilock;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900628
629 /* for nobh_write_end */
630 *fsdata = NULL;
631
632 f2fs_balance_fs(sbi);
Jaegeuk Kimafcb7ca2013-04-26 11:55:17 +0900633repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900634 page = grab_cache_page_write_begin(mapping, index, flags);
635 if (!page)
636 return -ENOMEM;
637 *pagep = page;
638
Jaegeuk Kim39936832012-11-22 16:21:29 +0900639 ilock = mutex_lock_op(sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900640
641 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900642 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900643 if (err)
644 goto err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900645
Jaegeuk Kim39936832012-11-22 16:21:29 +0900646 if (dn.data_blkaddr == NULL_ADDR)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900647 err = reserve_new_block(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900648
Jaegeuk Kim39936832012-11-22 16:21:29 +0900649 f2fs_put_dnode(&dn);
650 if (err)
651 goto err;
652
653 mutex_unlock_op(sbi, ilock);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900654
655 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
656 return 0;
657
658 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
659 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
660 unsigned end = start + len;
661
662 /* Reading beyond i_size is simple: memset to zero */
663 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900664 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900665 }
666
667 if (dn.data_blkaddr == NEW_ADDR) {
668 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
669 } else {
670 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900671 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900672 return err;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900673 lock_page(page);
674 if (!PageUptodate(page)) {
675 f2fs_put_page(page, 1);
676 return -EIO;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900677 }
Jaegeuk Kimafcb7ca2013-04-26 11:55:17 +0900678 if (page->mapping != mapping) {
679 f2fs_put_page(page, 1);
680 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900681 }
682 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900683out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900684 SetPageUptodate(page);
685 clear_cold_data(page);
686 return 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900687
688err:
689 mutex_unlock_op(sbi, ilock);
690 f2fs_put_page(page, 1);
691 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900692}
693
694static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
695 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
696{
697 struct file *file = iocb->ki_filp;
698 struct inode *inode = file->f_mapping->host;
699
700 if (rw == WRITE)
701 return 0;
702
703 /* Needs synchronization with the cleaner */
704 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
705 get_data_block_ro);
706}
707
708static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
709{
710 struct inode *inode = page->mapping->host;
711 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
712 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
713 dec_page_count(sbi, F2FS_DIRTY_DENTS);
714 inode_dec_dirty_dents(inode);
715 }
716 ClearPagePrivate(page);
717}
718
719static int f2fs_release_data_page(struct page *page, gfp_t wait)
720{
721 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +0900722 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900723}
724
725static int f2fs_set_data_page_dirty(struct page *page)
726{
727 struct address_space *mapping = page->mapping;
728 struct inode *inode = mapping->host;
729
730 SetPageUptodate(page);
731 if (!PageDirty(page)) {
732 __set_page_dirty_nobuffers(page);
733 set_dirty_dir_page(inode, page);
734 return 1;
735 }
736 return 0;
737}
738
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +0900739static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
740{
741 return generic_block_bmap(mapping, block, get_data_block_ro);
742}
743
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900744const struct address_space_operations f2fs_dblock_aops = {
745 .readpage = f2fs_read_data_page,
746 .readpages = f2fs_read_data_pages,
747 .writepage = f2fs_write_data_page,
748 .writepages = f2fs_write_data_pages,
749 .write_begin = f2fs_write_begin,
750 .write_end = nobh_write_end,
751 .set_page_dirty = f2fs_set_data_page_dirty,
752 .invalidatepage = f2fs_invalidate_data_page,
753 .releasepage = f2fs_release_data_page,
754 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +0900755 .bmap = f2fs_bmap,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900756};