f2fs: sync to upstream
https://git.kernel.org/cgit/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=linux-3.4&id=994642cfdbc08f1eda7ff48504f779cbcc9e3067
Change-Id: I5beb230d4a5dc3ef3913f5bfc4c28b39bcb755b1
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7c507bf..c192e57 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -15,9 +15,12 @@
#include <linux/aio.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
+#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/prefetch.h>
+#include <linux/uio.h>
+#include <linux/cleancache.h>
#include "f2fs.h"
#include "node.h"
@@ -30,6 +33,15 @@
struct bio_vec *bvec;
int i;
+ if (f2fs_bio_encrypted(bio)) {
+ if (err) {
+ f2fs_release_crypto_ctx(bio->bi_private);
+ } else {
+ f2fs_end_io_crypto_work(bio->bi_private, bio);
+ return;
+ }
+ }
+
__bio_for_each_segment(bvec, bio, i, 0) {
struct page *page = bvec->bv_page;
@@ -53,6 +65,8 @@
__bio_for_each_segment(bvec, bio, i, 0) {
struct page *page = bvec->bv_page;
+ f2fs_restore_and_release_control_page(&page);
+
if (unlikely(err)) {
set_page_dirty(page);
set_bit(AS_EIO, &page->mapping->flags);
@@ -77,13 +91,12 @@
{
struct bio *bio;
- /* No failure on bio allocation */
- bio = bio_alloc(GFP_NOIO, npages);
+ bio = f2fs_bio_alloc(npages);
bio->bi_bdev = sbi->sb->s_bdev;
bio->bi_sector = SECTOR_FROM_BLOCK(blk_addr);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
- bio->bi_private = sbi;
+ bio->bi_private = is_read ? NULL : sbi;
return bio;
}
@@ -130,20 +143,19 @@
* Fill the locked page with data located in the block address.
* Return unlocked page.
*/
-int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
- struct f2fs_io_info *fio)
+int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
+ struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
trace_f2fs_submit_page_bio(page, fio);
- f2fs_trace_ios(page, fio, 0);
+ f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw));
+ bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
bio_put(bio);
- f2fs_put_page(page, 1);
return -EFAULT;
}
@@ -151,12 +163,13 @@
return 0;
}
-void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
- struct f2fs_io_info *fio)
+void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
{
+ struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io;
bool is_read = is_read_io(fio->rw);
+ struct page *bio_page;
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
@@ -178,17 +191,19 @@
io->fio = *fio;
}
- if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
+ bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+
+ if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
PAGE_CACHE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
io->last_block_in_bio = fio->blk_addr;
- f2fs_trace_ios(page, fio, 0);
+ f2fs_trace_ios(fio, 0);
up_write(&io->io_rwsem);
- trace_f2fs_submit_page_mbio(page, fio);
+ trace_f2fs_submit_page_mbio(fio->page, fio);
}
/*
@@ -197,7 +212,7 @@
* ->node_page
* update block addresses in the node page
*/
-static void __set_data_blkaddr(struct dnode_of_data *dn)
+void set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn;
__le32 *addr_array;
@@ -226,7 +241,7 @@
trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
dn->data_blkaddr = NEW_ADDR;
- __set_data_blkaddr(dn);
+ set_data_blkaddr(dn);
mark_inode_dirty(dn->inode);
sync_inode_page(dn);
return 0;
@@ -248,219 +263,62 @@
return err;
}
-static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
- struct buffer_head *bh_result)
+int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- pgoff_t start_fofs, end_fofs;
- block_t start_blkaddr;
+ struct extent_info ei;
+ struct inode *inode = dn->inode;
- if (is_inode_flag_set(fi, FI_NO_EXTENT))
- return 0;
-
- read_lock(&fi->ext.ext_lock);
- if (fi->ext.len == 0) {
- read_unlock(&fi->ext.ext_lock);
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn->data_blkaddr = ei.blk + index - ei.fofs;
return 0;
}
- stat_inc_total_hit(inode->i_sb);
-
- start_fofs = fi->ext.fofs;
- end_fofs = fi->ext.fofs + fi->ext.len - 1;
- start_blkaddr = fi->ext.blk_addr;
-
- if (pgofs >= start_fofs && pgofs <= end_fofs) {
- unsigned int blkbits = inode->i_sb->s_blocksize_bits;
- size_t count;
-
- set_buffer_new(bh_result);
- map_bh(bh_result, inode->i_sb,
- start_blkaddr + pgofs - start_fofs);
- count = end_fofs - pgofs + 1;
- if (count < (UINT_MAX >> blkbits))
- bh_result->b_size = (count << blkbits);
- else
- bh_result->b_size = UINT_MAX;
-
- stat_inc_read_hit(inode->i_sb);
- read_unlock(&fi->ext.ext_lock);
- return 1;
- }
- read_unlock(&fi->ext.ext_lock);
- return 0;
+ return f2fs_reserve_block(dn, index);
}
-void update_extent_cache(struct dnode_of_data *dn)
-{
- struct f2fs_inode_info *fi = F2FS_I(dn->inode);
- pgoff_t fofs, start_fofs, end_fofs;
- block_t start_blkaddr, end_blkaddr;
- int need_update = true;
-
- f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
- /* Update the page address in the parent node */
- __set_data_blkaddr(dn);
-
- if (is_inode_flag_set(fi, FI_NO_EXTENT))
- return;
-
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
- dn->ofs_in_node;
-
- write_lock(&fi->ext.ext_lock);
-
- start_fofs = fi->ext.fofs;
- end_fofs = fi->ext.fofs + fi->ext.len - 1;
- start_blkaddr = fi->ext.blk_addr;
- end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
-
- /* Drop and initialize the matched extent */
- if (fi->ext.len == 1 && fofs == start_fofs)
- fi->ext.len = 0;
-
- /* Initial extent */
- if (fi->ext.len == 0) {
- if (dn->data_blkaddr != NULL_ADDR) {
- fi->ext.fofs = fofs;
- fi->ext.blk_addr = dn->data_blkaddr;
- fi->ext.len = 1;
- }
- goto end_update;
- }
-
- /* Front merge */
- if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) {
- fi->ext.fofs--;
- fi->ext.blk_addr--;
- fi->ext.len++;
- goto end_update;
- }
-
- /* Back merge */
- if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) {
- fi->ext.len++;
- goto end_update;
- }
-
- /* Split the existing extent */
- if (fi->ext.len > 1 &&
- fofs >= start_fofs && fofs <= end_fofs) {
- if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
- fi->ext.len = fofs - start_fofs;
- } else {
- fi->ext.fofs = fofs + 1;
- fi->ext.blk_addr = start_blkaddr +
- fofs - start_fofs + 1;
- fi->ext.len -= fofs - start_fofs + 1;
- }
- } else {
- need_update = false;
- }
-
- /* Finally, if the extent is very fragmented, let's drop the cache. */
- if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
- fi->ext.len = 0;
- set_inode_flag(fi, FI_NO_EXTENT);
- need_update = true;
- }
-end_update:
- write_unlock(&fi->ext.ext_lock);
- if (need_update)
- sync_inode_page(dn);
- return;
-}
-
-struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
+struct page *get_read_data_page(struct inode *inode, pgoff_t index,
+ int rw, bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
+ struct extent_info ei;
int err;
struct f2fs_io_info fio = {
+ .sbi = F2FS_I_SB(inode),
.type = DATA,
- .rw = sync ? READ_SYNC : READA,
+ .rw = rw,
+ .encrypted_page = NULL,
};
- page = find_get_page(mapping, index);
- if (page && PageUptodate(page))
- return page;
- f2fs_put_page(page, 0);
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+ return read_mapping_page(mapping, index, NULL);
+
+ page = f2fs_grab_cache_page(mapping, index, for_write);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ goto got_it;
+ }
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err)
- return ERR_PTR(err);
+ goto put_err;
f2fs_put_dnode(&dn);
- if (dn.data_blkaddr == NULL_ADDR)
- return ERR_PTR(-ENOENT);
-
- /* By fallocate(), there is no cached page, but with NEW_ADDR */
- if (unlikely(dn.data_blkaddr == NEW_ADDR))
- return ERR_PTR(-EINVAL);
-
- page = grab_cache_page(mapping, index);
- if (!page)
- return ERR_PTR(-ENOMEM);
-
+ if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
+ err = -ENOENT;
+ goto put_err;
+ }
+got_it:
if (PageUptodate(page)) {
unlock_page(page);
return page;
}
- fio.blk_addr = dn.data_blkaddr;
- err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
- if (err)
- return ERR_PTR(err);
-
- if (sync) {
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 0);
- return ERR_PTR(-EIO);
- }
- }
- return page;
-}
-
-/*
- * If it tries to access a hole, return an error.
- * Because, the callers, functions in dir.c and GC, should be able to know
- * whether this page exists or not.
- */
-struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
-{
- struct address_space *mapping = inode->i_mapping;
- struct dnode_of_data dn;
- struct page *page;
- int err;
- struct f2fs_io_info fio = {
- .type = DATA,
- .rw = READ_SYNC,
- };
-repeat:
- page = grab_cache_page(mapping, index);
- if (!page)
- return ERR_PTR(-ENOMEM);
-
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
- if (err) {
- f2fs_put_page(page, 1);
- return ERR_PTR(err);
- }
- f2fs_put_dnode(&dn);
-
- if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-ENOENT);
- }
-
- if (PageUptodate(page))
- return page;
-
/*
* A new dentry page is allocated but not able to be written, since its
* new inode page couldn't be allocated due to -ENOSPC.
@@ -470,14 +328,63 @@
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
+ unlock_page(page);
return page;
}
fio.blk_addr = dn.data_blkaddr;
- err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
+ fio.page = page;
+ err = f2fs_submit_page_bio(&fio);
if (err)
- return ERR_PTR(err);
+ goto put_err;
+ return page;
+put_err:
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
+}
+
+struct page *find_data_page(struct inode *inode, pgoff_t index)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+
+ page = find_get_page(mapping, index);
+ if (page && PageUptodate(page))
+ return page;
+ f2fs_put_page(page, 0);
+
+ page = get_read_data_page(inode, index, READ_SYNC, false);
+ if (IS_ERR(page))
+ return page;
+
+ if (PageUptodate(page))
+ return page;
+
+ wait_on_page_locked(page);
+ if (unlikely(!PageUptodate(page))) {
+ f2fs_put_page(page, 0);
+ return ERR_PTR(-EIO);
+ }
+ return page;
+}
+
+/*
+ * If it tries to access a hole, return an error.
+ * Because, the callers, functions in dir.c and GC, should be able to know
+ * whether this page exists or not.
+ */
+struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
+ bool for_write)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+repeat:
+ page = get_read_data_page(inode, index, READ_SYNC, for_write);
+ if (IS_ERR(page))
+ return page;
+
+ /* wait for read completion */
lock_page(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
@@ -496,7 +403,8 @@
*
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
- * Note that, ipage is set only by make_empty_dir.
+ * Note that, ipage is set only by make_empty_dir, and if any error occur,
+ * ipage should be released by this function.
*/
struct page *get_new_data_page(struct inode *inode,
struct page *ipage, pgoff_t index, bool new_i_size)
@@ -505,57 +413,50 @@
struct page *page;
struct dnode_of_data dn;
int err;
+repeat:
+ page = f2fs_grab_cache_page(mapping, index, true);
+ if (!page) {
+ /*
+ * before exiting, we should make sure ipage will be released
+ * if any error occur.
+ */
+ f2fs_put_page(ipage, 1);
+ return ERR_PTR(-ENOMEM);
+ }
set_new_dnode(&dn, inode, ipage, NULL, 0);
err = f2fs_reserve_block(&dn, index);
- if (err)
+ if (err) {
+ f2fs_put_page(page, 1);
return ERR_PTR(err);
-repeat:
- page = grab_cache_page(mapping, index);
- if (!page) {
- err = -ENOMEM;
- goto put_err;
}
+ if (!ipage)
+ f2fs_put_dnode(&dn);
if (PageUptodate(page))
- return page;
+ goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
} else {
- struct f2fs_io_info fio = {
- .type = DATA,
- .rw = READ_SYNC,
- .blk_addr = dn.data_blkaddr,
- };
- err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
- if (err)
- goto put_err;
+ f2fs_put_page(page, 1);
- lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- err = -EIO;
- goto put_err;
- }
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
+ page = get_read_data_page(inode, index, READ_SYNC, true);
+ if (IS_ERR(page))
goto repeat;
- }
- }
- if (new_i_size &&
- i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
- i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
+ /* wait for read completion */
+ lock_page(page);
+ }
+got_it:
+ if (new_i_size && i_size_read(inode) <
+ ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
+ i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
/* Only the directory inode sets new_i_size */
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
}
return page;
-
-put_err:
- f2fs_put_dnode(&dn);
- return ERR_PTR(err);
}
static int __allocate_data_block(struct dnode_of_data *dn)
@@ -569,25 +470,34 @@
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return -EPERM;
+
+ dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+ if (dn->data_blkaddr == NEW_ADDR)
+ goto alloc;
+
if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
return -ENOSPC;
+alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
seg = CURSEG_DIRECT_IO;
- allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
-
- /* direct IO doesn't use extent cache to maximize the performance */
- __set_data_blkaddr(dn);
+ allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+ &sum, seg);
+ set_data_blkaddr(dn);
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
- i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
+ if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
+ i_size_write(dn->inode,
+ ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
+
+ /* direct IO doesn't use extent cache to maximize the performance */
+ f2fs_drop_largest_extent(dn->inode, fofs);
return 0;
}
@@ -615,7 +525,13 @@
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
while (dn.ofs_in_node < end_offset && len) {
- if (dn.data_blkaddr == NULL_ADDR) {
+ block_t blkaddr;
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto sync_out;
+
+ blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
if (__allocate_data_block(&dn))
goto sync_out;
allocated = true;
@@ -643,29 +559,38 @@
}
/*
- * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
+ * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
+ * f2fs_map_blocks structure.
* If original data blocks are allocated, then give them to blockdev.
* Otherwise,
* a. preallocate requested block addresses
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/
-static int __get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create, bool fiemap)
+static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+ int create, int flag)
{
- unsigned int blkbits = inode->i_sb->s_blocksize_bits;
- unsigned maxblocks = bh_result->b_size >> blkbits;
+ unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
pgoff_t pgofs, end_offset;
int err = 0, ofs = 1;
+ struct extent_info ei;
bool allocated = false;
- /* Get the page offset from the block offset(iblock) */
- pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
+ map->m_len = 0;
+ map->m_flags = 0;
- if (check_extent_cache(inode, pgofs, bh_result))
+ /* it only supports block size == page size */
+ pgofs = (pgoff_t)map->m_lblk;
+
+ if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+ map->m_pblk = ei.blk + pgofs - ei.fofs;
+ map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
+ map->m_flags = F2FS_MAP_MAPPED;
goto out;
+ }
if (create)
f2fs_lock_op(F2FS_I_SB(inode));
@@ -678,25 +603,40 @@
err = 0;
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR && !fiemap)
- goto put_out;
- if (dn.data_blkaddr != NULL_ADDR) {
- set_buffer_new(bh_result);
- map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
- } else if (create) {
- err = __allocate_data_block(&dn);
- if (err)
- goto put_out;
- allocated = true;
- set_buffer_new(bh_result);
- map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
- } else {
- goto put_out;
+ if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+ if (create) {
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto put_out;
+ }
+ err = __allocate_data_block(&dn);
+ if (err)
+ goto put_out;
+ allocated = true;
+ map->m_flags = F2FS_MAP_NEW;
+ } else {
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ dn.data_blkaddr != NEW_ADDR) {
+ if (flag == F2FS_GET_BLOCK_BMAP)
+ err = -ENOENT;
+ goto put_out;
+ }
+
+ /*
+ * preallocated unwritten block should be mapped
+ * for fiemap.
+ */
+ if (dn.data_blkaddr == NEW_ADDR)
+ map->m_flags = F2FS_MAP_UNWRITTEN;
+ }
}
+ map->m_flags |= F2FS_MAP_MAPPED;
+ map->m_pblk = dn.data_blkaddr;
+ map->m_len = 1;
+
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
- bh_result->b_size = (((size_t)1) << blkbits);
dn.ofs_in_node++;
pgofs++;
@@ -714,27 +654,45 @@
err = 0;
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR && !fiemap)
- goto put_out;
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
}
- if (maxblocks > (bh_result->b_size >> blkbits)) {
+ if (maxblocks > map->m_len) {
block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
- if (blkaddr == NULL_ADDR && create) {
- err = __allocate_data_block(&dn);
- if (err)
- goto sync_out;
- allocated = true;
- blkaddr = dn.data_blkaddr;
+
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
+ if (create) {
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto sync_out;
+ }
+ err = __allocate_data_block(&dn);
+ if (err)
+ goto sync_out;
+ allocated = true;
+ map->m_flags |= F2FS_MAP_NEW;
+ blkaddr = dn.data_blkaddr;
+ } else {
+ /*
+ * we only merge preallocated unwritten blocks
+ * for fiemap.
+ */
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ blkaddr != NEW_ADDR)
+ goto sync_out;
+ }
}
+
/* Give more consecutive addresses for the readahead */
- if (blkaddr == (bh_result->b_blocknr + ofs)) {
+ if ((map->m_pblk != NEW_ADDR &&
+ blkaddr == (map->m_pblk + ofs)) ||
+ (map->m_pblk == NEW_ADDR &&
+ blkaddr == NEW_ADDR)) {
ofs++;
dn.ofs_in_node++;
pgofs++;
- bh_result->b_size += (((size_t)1) << blkbits);
+ map->m_len++;
goto get_next;
}
}
@@ -747,27 +705,312 @@
if (create)
f2fs_unlock_op(F2FS_I_SB(inode));
out:
- trace_f2fs_get_data_block(inode, iblock, bh_result, err);
+ trace_f2fs_map_blocks(inode, map, err);
return err;
}
-static int get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
+static int __get_data_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create, int flag)
{
- return __get_data_block(inode, iblock, bh_result, create, false);
+ struct f2fs_map_blocks map;
+ int ret;
+
+ map.m_lblk = iblock;
+ map.m_len = bh->b_size >> inode->i_blkbits;
+
+ ret = f2fs_map_blocks(inode, &map, create, flag);
+ if (!ret) {
+ map_bh(bh, inode->i_sb, map.m_pblk);
+ bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
+ bh->b_size = map.m_len << inode->i_blkbits;
+ }
+ return ret;
}
-static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
+static int get_data_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create, int flag)
+{
+ return __get_data_block(inode, iblock, bh_result, create, flag);
+}
+
+static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
- return __get_data_block(inode, iblock, bh_result, create, true);
+ return __get_data_block(inode, iblock, bh_result, create,
+ F2FS_GET_BLOCK_DIO);
+}
+
+static int get_data_block_bmap(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ return __get_data_block(inode, iblock, bh_result, create,
+ F2FS_GET_BLOCK_BMAP);
+}
+
+static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
+{
+ return (offset >> inode->i_blkbits);
+}
+
+static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
+{
+ return (blk << inode->i_blkbits);
}
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
- return generic_block_fiemap(inode, fieinfo,
- start, len, get_data_block_fiemap);
+ struct buffer_head map_bh;
+ sector_t start_blk, last_blk;
+ loff_t isize = i_size_read(inode);
+ u64 logical = 0, phys = 0, size = 0;
+ u32 flags = 0;
+ bool past_eof = false, whole_file = false;
+ int ret = 0;
+
+ ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ if (ret)
+ return ret;
+
+ mutex_lock(&inode->i_mutex);
+
+ if (len >= isize) {
+ whole_file = true;
+ len = isize;
+ }
+
+ if (logical_to_blk(inode, len) == 0)
+ len = blk_to_logical(inode, 1);
+
+ start_blk = logical_to_blk(inode, start);
+ last_blk = logical_to_blk(inode, start + len - 1);
+next:
+ memset(&map_bh, 0, sizeof(struct buffer_head));
+ map_bh.b_size = len;
+
+ ret = get_data_block(inode, start_blk, &map_bh, 0,
+ F2FS_GET_BLOCK_FIEMAP);
+ if (ret)
+ goto out;
+
+ /* HOLE */
+ if (!buffer_mapped(&map_bh)) {
+ start_blk++;
+
+ if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
+ past_eof = 1;
+
+ if (past_eof && size) {
+ flags |= FIEMAP_EXTENT_LAST;
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
+ } else if (size) {
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
+ size = 0;
+ }
+
+ /* if we have holes up to/past EOF then we're done */
+ if (start_blk > last_blk || past_eof || ret)
+ goto out;
+ } else {
+ if (start_blk > last_blk && !whole_file) {
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
+ goto out;
+ }
+
+ /*
+ * if size != 0 then we know we already have an extent
+ * to add, so add it.
+ */
+ if (size) {
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
+ if (ret)
+ goto out;
+ }
+
+ logical = blk_to_logical(inode, start_blk);
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = map_bh.b_size;
+ flags = 0;
+ if (buffer_unwritten(&map_bh))
+ flags = FIEMAP_EXTENT_UNWRITTEN;
+
+ start_blk += logical_to_blk(inode, size);
+
+ /*
+ * If we are past the EOF, then we need to make sure as
+ * soon as we find a hole that the last extent we found
+ * is marked with FIEMAP_EXTENT_LAST
+ */
+ if (!past_eof && logical + size >= isize)
+ past_eof = true;
+ }
+ cond_resched();
+ if (fatal_signal_pending(current))
+ ret = -EINTR;
+ else
+ goto next;
+out:
+ if (ret == 1)
+ ret = 0;
+
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+}
+
+/*
+ * This function was originally taken from fs/mpage.c, and customized for f2fs.
+ * Major change was from block_size == page_size in f2fs by default.
+ */
+static int f2fs_mpage_readpages(struct address_space *mapping,
+ struct list_head *pages, struct page *page,
+ unsigned nr_pages)
+{
+ struct bio *bio = NULL;
+ unsigned page_idx;
+ sector_t last_block_in_bio = 0;
+ struct inode *inode = mapping->host;
+ const unsigned blkbits = inode->i_blkbits;
+ const unsigned blocksize = 1 << blkbits;
+ sector_t block_in_file;
+ sector_t last_block;
+ sector_t last_block_in_file;
+ sector_t block_nr;
+ struct block_device *bdev = inode->i_sb->s_bdev;
+ struct f2fs_map_blocks map;
+
+ map.m_pblk = 0;
+ map.m_lblk = 0;
+ map.m_len = 0;
+ map.m_flags = 0;
+
+ for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
+
+ prefetchw(&page->flags);
+ if (pages) {
+ page = list_entry(pages->prev, struct page, lru);
+ list_del(&page->lru);
+ if (add_to_page_cache_lru(page, mapping,
+ page->index, GFP_KERNEL))
+ goto next_page;
+ }
+
+ block_in_file = (sector_t)page->index;
+ last_block = block_in_file + nr_pages;
+ last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
+ blkbits;
+ if (last_block > last_block_in_file)
+ last_block = last_block_in_file;
+
+ /*
+ * Map blocks using the previous result first.
+ */
+ if ((map.m_flags & F2FS_MAP_MAPPED) &&
+ block_in_file > map.m_lblk &&
+ block_in_file < (map.m_lblk + map.m_len))
+ goto got_it;
+
+ /*
+ * Then do more f2fs_map_blocks() calls until we are
+ * done with this page.
+ */
+ map.m_flags = 0;
+
+ if (block_in_file < last_block) {
+ map.m_lblk = block_in_file;
+ map.m_len = last_block - block_in_file;
+
+ if (f2fs_map_blocks(inode, &map, 0,
+ F2FS_GET_BLOCK_READ))
+ goto set_error_page;
+ }
+got_it:
+ if ((map.m_flags & F2FS_MAP_MAPPED)) {
+ block_nr = map.m_pblk + block_in_file - map.m_lblk;
+ SetPageMappedToDisk(page);
+
+ if (!PageUptodate(page) && !cleancache_get_page(page)) {
+ SetPageUptodate(page);
+ goto confused;
+ }
+ } else {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ unlock_page(page);
+ goto next_page;
+ }
+
+ /*
+ * This page will go to BIO. Do we need to send this
+ * BIO off first?
+ */
+ if (bio && (last_block_in_bio != block_nr - 1)) {
+submit_and_realloc:
+ submit_bio(READ, bio);
+ bio = NULL;
+ }
+ if (bio == NULL) {
+ struct f2fs_crypto_ctx *ctx = NULL;
+
+ if (f2fs_encrypted_inode(inode) &&
+ S_ISREG(inode->i_mode)) {
+ struct page *cpage;
+
+ ctx = f2fs_get_crypto_ctx(inode);
+ if (IS_ERR(ctx))
+ goto set_error_page;
+
+ /* wait the page to be moved by cleaning */
+ cpage = find_lock_page(
+ META_MAPPING(F2FS_I_SB(inode)),
+ block_nr);
+ if (cpage) {
+ f2fs_wait_on_page_writeback(cpage,
+ DATA);
+ f2fs_put_page(cpage, 1);
+ }
+ }
+
+ bio = bio_alloc(GFP_KERNEL,
+ min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ f2fs_release_crypto_ctx(ctx);
+ goto set_error_page;
+ }
+ bio->bi_bdev = bdev;
+ bio->bi_sector = SECTOR_FROM_BLOCK(block_nr);
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = ctx;
+ }
+
+ if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ goto submit_and_realloc;
+
+ last_block_in_bio = block_nr;
+ goto next_page;
+set_error_page:
+ SetPageError(page);
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ unlock_page(page);
+ goto next_page;
+confused:
+ if (bio) {
+ submit_bio(READ, bio);
+ bio = NULL;
+ }
+ unlock_page(page);
+next_page:
+ if (pages)
+ page_cache_release(page);
+ }
+ BUG_ON(pages && !list_empty(pages));
+ if (bio)
+ submit_bio(READ, bio);
+ return 0;
}
static int f2fs_read_data_page(struct file *file, struct page *page)
@@ -781,8 +1024,7 @@
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
- ret = mpage_readpage(page, get_data_block);
-
+ ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
@@ -796,11 +1038,12 @@
if (f2fs_has_inline_data(inode))
return 0;
- return mpage_readpages(mapping, pages, nr_pages, get_data_block);
+ return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
}
-int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
+int do_write_data_page(struct f2fs_io_info *fio)
{
+ struct page *page = fio->page;
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
int err = 0;
@@ -813,8 +1056,18 @@
fio->blk_addr = dn.data_blkaddr;
/* This page is already truncated */
- if (fio->blk_addr == NULL_ADDR)
+ if (fio->blk_addr == NULL_ADDR) {
+ ClearPageUptodate(page);
goto out_writepage;
+ }
+
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ fio->encrypted_page = f2fs_encrypt(inode, fio->page);
+ if (IS_ERR(fio->encrypted_page)) {
+ err = PTR_ERR(fio->encrypted_page);
+ goto out_writepage;
+ }
+ }
set_page_writeback(page);
@@ -825,12 +1078,17 @@
if (unlikely(fio->blk_addr != NEW_ADDR &&
!is_cold_data(page) &&
need_inplace_update(inode))) {
- rewrite_data_page(page, fio);
+ rewrite_data_page(fio);
set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
+ trace_f2fs_do_write_data_page(page, IPU);
} else {
- write_data_page(page, &dn, fio);
- update_extent_cache(&dn);
+ write_data_page(&dn, fio);
+ set_data_blkaddr(&dn);
+ f2fs_update_extent_cache(&dn);
+ trace_f2fs_do_write_data_page(page, OPU);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ if (page->index == 0)
+ set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
}
out_writepage:
f2fs_put_dnode(&dn);
@@ -849,8 +1107,11 @@
bool need_balance_fs = false;
int err = 0;
struct f2fs_io_info fio = {
+ .sbi = sbi,
.type = DATA,
.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+ .page = page,
+ .encrypted_page = NULL,
};
trace_f2fs_writepage(page, DATA);
@@ -880,7 +1141,7 @@
if (S_ISDIR(inode->i_mode)) {
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
- err = do_write_data_page(page, &fio);
+ err = do_write_data_page(&fio);
goto done;
}
@@ -900,7 +1161,7 @@
if (f2fs_has_inline_data(inode))
err = f2fs_write_inline_data(inode, page);
if (err == -EAGAIN)
- err = do_write_data_page(page, &fio);
+ err = do_write_data_page(&fio);
f2fs_unlock_op(sbi);
done:
if (err && err != -ENOENT)
@@ -909,6 +1170,8 @@
clear_cold_data(page);
out:
inode_dec_dirty_pages(inode);
+ if (err)
+ ClearPageUptodate(page);
unlock_page(page);
if (need_balance_fs)
f2fs_balance_fs(sbi);
@@ -930,6 +1193,137 @@
return ret;
}
+/*
+ * This function was copied from write_cche_pages from mm/page-writeback.c.
+ * The major change is making write step of cold data page separately from
+ * warm/hot data page.
+ */
+static int f2fs_write_cache_pages(struct address_space *mapping,
+ struct writeback_control *wbc, writepage_t writepage,
+ void *data)
+{
+ int ret = 0;
+ int done = 0;
+ struct pagevec pvec;
+ int nr_pages;
+ pgoff_t uninitialized_var(writeback_index);
+ pgoff_t index;
+ pgoff_t end; /* Inclusive */
+ pgoff_t done_index;
+ int cycled;
+ int range_whole = 0;
+ int tag;
+ int step = 0;
+
+ pagevec_init(&pvec, 0);
+next:
+ if (wbc->range_cyclic) {
+ writeback_index = mapping->writeback_index; /* prev offset */
+ index = writeback_index;
+ if (index == 0)
+ cycled = 1;
+ else
+ cycled = 0;
+ end = -1;
+ } else {
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
+ cycled = 1; /* ignore range_cyclic tests */
+ }
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ tag = PAGECACHE_TAG_TOWRITE;
+ else
+ tag = PAGECACHE_TAG_DIRTY;
+retry:
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ tag_pages_for_writeback(mapping, index, end);
+ done_index = index;
+ while (!done && (index <= end)) {
+ int i;
+
+ nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (page->index > end) {
+ done = 1;
+ break;
+ }
+
+ done_index = page->index;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != mapping)) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ if (step == is_cold_data(page))
+ goto continue_unlock;
+
+ if (PageWriteback(page)) {
+ if (wbc->sync_mode != WB_SYNC_NONE)
+ f2fs_wait_on_page_writeback(page, DATA);
+ else
+ goto continue_unlock;
+ }
+
+ BUG_ON(PageWriteback(page));
+ if (!clear_page_dirty_for_io(page))
+ goto continue_unlock;
+
+ ret = (*writepage)(page, wbc, data);
+ if (unlikely(ret)) {
+ if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ unlock_page(page);
+ ret = 0;
+ } else {
+ done_index = page->index + 1;
+ done = 1;
+ break;
+ }
+ }
+
+ if (--wbc->nr_to_write <= 0 &&
+ wbc->sync_mode == WB_SYNC_NONE) {
+ done = 1;
+ break;
+ }
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+
+ if (step < 1) {
+ step++;
+ goto next;
+ }
+
+ if (!cycled && !done) {
+ cycled = 1;
+ index = 0;
+ end = writeback_index - 1;
+ goto retry;
+ }
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+ mapping->writeback_index = done_index;
+
+ return ret;
+}
+
static int f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
@@ -945,23 +1339,30 @@
if (!mapping->a_ops->writepage)
return 0;
+ /* skip writing if there is no dirty page in this inode */
+ if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
+ return 0;
+
if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
+ /* during POR, we don't need to trigger writepage at all. */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto skip_write;
+
diff = nr_pages_to_write(sbi, DATA, wbc);
if (!S_ISDIR(inode->i_mode)) {
mutex_lock(&sbi->writepages);
locked = true;
}
- ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+ ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+ f2fs_submit_merged_bio(sbi, DATA, WRITE);
if (locked)
mutex_unlock(&sbi->writepages);
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
remove_dirty_dir_inode(inode);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
@@ -988,7 +1389,8 @@
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct page *page, *ipage;
+ struct page *page = NULL;
+ struct page *ipage;
pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
struct dnode_of_data dn;
int err = 0;
@@ -1038,42 +1440,47 @@
if (err)
goto put_fail;
}
- err = f2fs_reserve_block(&dn, index);
+
+ err = f2fs_get_block(&dn, index);
if (err)
goto put_fail;
put_next:
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
- if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
- return 0;
-
f2fs_wait_on_page_writeback(page, DATA);
+ if (len == PAGE_CACHE_SIZE)
+ goto out_update;
+ if (PageUptodate(page))
+ goto out_clear;
+
if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
unsigned start = pos & (PAGE_CACHE_SIZE - 1);
unsigned end = start + len;
/* Reading beyond i_size is simple: memset to zero */
zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
- goto out;
+ goto out_update;
}
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
} else {
struct f2fs_io_info fio = {
+ .sbi = sbi,
.type = DATA,
.rw = READ_SYNC,
.blk_addr = dn.data_blkaddr,
+ .page = page,
+ .encrypted_page = NULL,
};
- err = f2fs_submit_page_bio(sbi, page, &fio);
+ err = f2fs_submit_page_bio(&fio);
if (err)
goto fail;
lock_page(page);
if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
err = -EIO;
goto fail;
}
@@ -1081,9 +1488,17 @@
f2fs_put_page(page, 1);
goto repeat;
}
+
+ /* avoid symlink page */
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ err = f2fs_decrypt_one(inode, page);
+ if (err)
+ goto fail;
+ }
}
-out:
+out_update:
SetPageUptodate(page);
+out_clear:
clear_cold_data(page);
return 0;
@@ -1091,8 +1506,8 @@
f2fs_put_dnode(&dn);
unlock_fail:
f2fs_unlock_op(sbi);
- f2fs_put_page(page, 1);
fail:
+ f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
return err;
}
@@ -1118,23 +1533,44 @@
return copied;
}
-static int check_direct_IO(struct inode *inode, int rw,
+static ssize_t check_direct_IO(struct inode *inode, int rw,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
- int i;
-
- if (rw == READ)
- return 0;
+ int seg, i;
+ size_t size;
+ unsigned long addr;
+ ssize_t retval = -EINVAL;
+ loff_t end = offset;
if (offset & blocksize_mask)
return -EINVAL;
- for (i = 0; i < nr_segs; i++)
- if (iov[i].iov_len & blocksize_mask)
- return -EINVAL;
+ /* Check the memory alignment. Blocks cannot straddle pages */
+ for (seg = 0; seg < nr_segs; seg++) {
+ addr = (unsigned long)iov[seg].iov_base;
+ size = iov[seg].iov_len;
+ end += size;
+ if ((addr & blocksize_mask) || (size & blocksize_mask))
+ goto out;
- return 0;
+ /* If this is a write we don't need to check anymore */
+ if (rw & WRITE)
+ continue;
+
+ /*
+ * Check to make sure we don't have duplicate iov_base's in this
+ * iovec, if so return EINVAL, otherwise we'll get csum errors
+ * when reading back.
+ */
+ for (i = seg + 1; i < nr_segs; i++) {
+ if (iov[seg].iov_base == iov[i].iov_base)
+ goto out;
+ }
+ }
+ retval = 0;
+out:
+ return retval;
}
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
@@ -1154,16 +1590,26 @@
return err;
}
- if (check_direct_IO(inode, rw, iov, offset, nr_segs))
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
+ err = check_direct_IO(inode, rw, iov, offset, nr_segs);
+ if (err)
+ return err;
+
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
- if (rw & WRITE)
+ if (rw & WRITE) {
__allocate_data_blocks(inode, offset, count);
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+ err = -EIO;
+ goto out;
+ }
+ }
err = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
- get_data_block);
+ get_data_block_dio);
+out:
if (err < 0 && (rw & WRITE))
f2fs_write_failed(mapping, offset + count);
@@ -1188,6 +1634,11 @@
else
inode_dec_dirty_pages(inode);
}
+
+ /* This is atomic written page, keep Private */
+ if (IS_ATOMIC_WRITTEN_PAGE(page))
+ return;
+
ClearPagePrivate(page);
}
@@ -1197,6 +1648,10 @@
if (PageDirty(page))
return 0;
+ /* This is atomic written page, keep Private */
+ if (IS_ATOMIC_WRITTEN_PAGE(page))
+ return 0;
+
ClearPagePrivate(page);
return 1;
}
@@ -1211,12 +1666,17 @@
SetPageUptodate(page);
if (f2fs_is_atomic_file(inode)) {
- register_inmem_page(inode, page);
- return 1;
+ if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
+ register_inmem_page(inode, page);
+ return 1;
+ }
+ /*
+ * Previously, this page has been registered, we just
+ * return here.
+ */
+ return 0;
}
- mark_inode_dirty(inode);
-
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
@@ -1235,7 +1695,7 @@
if (err)
return err;
}
- return generic_block_bmap(mapping, block, get_data_block);
+ return generic_block_bmap(mapping, block, get_data_block_bmap);
}
const struct address_space_operations f2fs_dblock_aops = {