| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * fs/logfs/file.c	- prepare_write, commit_write and friends | 
|  | 3 | * | 
|  | 4 | * As should be obvious for Linux kernel code, license is GPLv2 | 
|  | 5 | * | 
|  | 6 | * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> | 
|  | 7 | */ | 
|  | 8 | #include "logfs.h" | 
|  | 9 | #include <linux/sched.h> | 
|  | 10 | #include <linux/writeback.h> | 
|  | 11 |  | 
|  | 12 | static int logfs_write_begin(struct file *file, struct address_space *mapping, | 
|  | 13 | loff_t pos, unsigned len, unsigned flags, | 
|  | 14 | struct page **pagep, void **fsdata) | 
|  | 15 | { | 
|  | 16 | struct inode *inode = mapping->host; | 
|  | 17 | struct page *page; | 
|  | 18 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 
|  | 19 |  | 
|  | 20 | page = grab_cache_page_write_begin(mapping, index, flags); | 
|  | 21 | if (!page) | 
|  | 22 | return -ENOMEM; | 
|  | 23 | *pagep = page; | 
|  | 24 |  | 
|  | 25 | if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) | 
|  | 26 | return 0; | 
|  | 27 | if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { | 
|  | 28 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); | 
|  | 29 | unsigned end = start + len; | 
|  | 30 |  | 
|  | 31 | /* Reading beyond i_size is simple: memset to zero */ | 
|  | 32 | zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); | 
|  | 33 | return 0; | 
|  | 34 | } | 
|  | 35 | return logfs_readpage_nolock(page); | 
|  | 36 | } | 
|  | 37 |  | 
|  | 38 | static int logfs_write_end(struct file *file, struct address_space *mapping, | 
|  | 39 | loff_t pos, unsigned len, unsigned copied, struct page *page, | 
|  | 40 | void *fsdata) | 
|  | 41 | { | 
|  | 42 | struct inode *inode = mapping->host; | 
|  | 43 | pgoff_t index = page->index; | 
|  | 44 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); | 
|  | 45 | unsigned end = start + copied; | 
|  | 46 | int ret = 0; | 
|  | 47 |  | 
|  | 48 | BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize); | 
|  | 49 | BUG_ON(page->index > I3_BLOCKS); | 
|  | 50 |  | 
|  | 51 | if (copied < len) { | 
|  | 52 | /* | 
|  | 53 | * Short write of a non-initialized paged.  Just tell userspace | 
|  | 54 | * to retry the entire page. | 
|  | 55 | */ | 
|  | 56 | if (!PageUptodate(page)) { | 
|  | 57 | copied = 0; | 
|  | 58 | goto out; | 
|  | 59 | } | 
|  | 60 | } | 
|  | 61 | if (copied == 0) | 
|  | 62 | goto out; /* FIXME: do we need to update inode? */ | 
|  | 63 |  | 
|  | 64 | if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) { | 
|  | 65 | i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end); | 
|  | 66 | mark_inode_dirty_sync(inode); | 
|  | 67 | } | 
|  | 68 |  | 
|  | 69 | SetPageUptodate(page); | 
|  | 70 | if (!PageDirty(page)) { | 
|  | 71 | if (!get_page_reserve(inode, page)) | 
|  | 72 | __set_page_dirty_nobuffers(page); | 
|  | 73 | else | 
|  | 74 | ret = logfs_write_buf(inode, page, WF_LOCK); | 
|  | 75 | } | 
|  | 76 | out: | 
|  | 77 | unlock_page(page); | 
|  | 78 | page_cache_release(page); | 
|  | 79 | return ret ? ret : copied; | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | int logfs_readpage(struct file *file, struct page *page) | 
|  | 83 | { | 
|  | 84 | int ret; | 
|  | 85 |  | 
|  | 86 | ret = logfs_readpage_nolock(page); | 
|  | 87 | unlock_page(page); | 
|  | 88 | return ret; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | /* Clear the page's dirty flag in the radix tree. */ | 
|  | 92 | /* TODO: mucking with PageWriteback is silly.  Add a generic function to clear | 
|  | 93 | * the dirty bit from the radix tree for filesystems that don't have to wait | 
|  | 94 | * for page writeback to finish (i.e. any compressing filesystem). | 
|  | 95 | */ | 
|  | 96 | static void clear_radix_tree_dirty(struct page *page) | 
|  | 97 | { | 
|  | 98 | BUG_ON(PagePrivate(page) || page->private); | 
|  | 99 | set_page_writeback(page); | 
|  | 100 | end_page_writeback(page); | 
|  | 101 | } | 
|  | 102 |  | 
|  | 103 | static int __logfs_writepage(struct page *page) | 
|  | 104 | { | 
|  | 105 | struct inode *inode = page->mapping->host; | 
|  | 106 | int err; | 
|  | 107 |  | 
|  | 108 | err = logfs_write_buf(inode, page, WF_LOCK); | 
|  | 109 | if (err) | 
|  | 110 | set_page_dirty(page); | 
|  | 111 | else | 
|  | 112 | clear_radix_tree_dirty(page); | 
|  | 113 | unlock_page(page); | 
|  | 114 | return err; | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | static int logfs_writepage(struct page *page, struct writeback_control *wbc) | 
|  | 118 | { | 
|  | 119 | struct inode *inode = page->mapping->host; | 
|  | 120 | loff_t i_size = i_size_read(inode); | 
|  | 121 | pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | 
|  | 122 | unsigned offset; | 
|  | 123 | u64 bix; | 
|  | 124 | level_t level; | 
|  | 125 |  | 
|  | 126 | log_file("logfs_writepage(%lx, %lx, %p)\n", inode->i_ino, page->index, | 
|  | 127 | page); | 
|  | 128 |  | 
|  | 129 | logfs_unpack_index(page->index, &bix, &level); | 
|  | 130 |  | 
|  | 131 | /* Indirect blocks are never truncated */ | 
|  | 132 | if (level != 0) | 
|  | 133 | return __logfs_writepage(page); | 
|  | 134 |  | 
|  | 135 | /* | 
|  | 136 | * TODO: everything below is a near-verbatim copy of nobh_writepage(). | 
|  | 137 | * The relevant bits should be factored out after logfs is merged. | 
|  | 138 | */ | 
|  | 139 |  | 
|  | 140 | /* Is the page fully inside i_size? */ | 
|  | 141 | if (bix < end_index) | 
|  | 142 | return __logfs_writepage(page); | 
|  | 143 |  | 
|  | 144 | /* Is the page fully outside i_size? (truncate in progress) */ | 
|  | 145 | offset = i_size & (PAGE_CACHE_SIZE-1); | 
|  | 146 | if (bix > end_index || offset == 0) { | 
|  | 147 | unlock_page(page); | 
|  | 148 | return 0; /* don't care */ | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | /* | 
|  | 152 | * The page straddles i_size.  It must be zeroed out on each and every | 
|  | 153 | * writepage invokation because it may be mmapped.  "A file is mapped | 
|  | 154 | * in multiples of the page size.  For a file that is not a multiple of | 
|  | 155 | * the  page size, the remaining memory is zeroed when mapped, and | 
|  | 156 | * writes to that region are not written out to the file." | 
|  | 157 | */ | 
|  | 158 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 
|  | 159 | return __logfs_writepage(page); | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | static void logfs_invalidatepage(struct page *page, unsigned long offset) | 
|  | 163 | { | 
| Joern Engel | 05ebad8 | 2010-05-04 19:41:09 +0200 | [diff] [blame] | 164 | struct logfs_block *block = logfs_block(page); | 
|  | 165 |  | 
|  | 166 | if (block->reserved_bytes) { | 
|  | 167 | struct super_block *sb = page->mapping->host->i_sb; | 
|  | 168 | struct logfs_super *super = logfs_super(sb); | 
|  | 169 |  | 
|  | 170 | super->s_dirty_pages -= block->reserved_bytes; | 
|  | 171 | block->ops->free_block(sb, block); | 
|  | 172 | BUG_ON(bitmap_weight(block->alias_map, LOGFS_BLOCK_FACTOR)); | 
|  | 173 | } else | 
|  | 174 | move_page_to_btree(page); | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 175 | BUG_ON(PagePrivate(page) || page->private); | 
|  | 176 | } | 
|  | 177 |  | 
|  | 178 | static int logfs_releasepage(struct page *page, gfp_t only_xfs_uses_this) | 
|  | 179 | { | 
|  | 180 | return 0; /* None of these are easy to release */ | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 |  | 
| Arnd Bergmann | 02d6d68 | 2010-04-27 22:30:06 +0200 | [diff] [blame] | 184 | long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 185 | { | 
| Arnd Bergmann | 02d6d68 | 2010-04-27 22:30:06 +0200 | [diff] [blame] | 186 | struct inode *inode = file->f_path.dentry->d_inode; | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 187 | struct logfs_inode *li = logfs_inode(inode); | 
|  | 188 | unsigned int oldflags, flags; | 
|  | 189 | int err; | 
|  | 190 |  | 
|  | 191 | switch (cmd) { | 
|  | 192 | case FS_IOC_GETFLAGS: | 
|  | 193 | flags = li->li_flags & LOGFS_FL_USER_VISIBLE; | 
|  | 194 | return put_user(flags, (int __user *)arg); | 
|  | 195 | case FS_IOC_SETFLAGS: | 
|  | 196 | if (IS_RDONLY(inode)) | 
|  | 197 | return -EROFS; | 
|  | 198 |  | 
|  | 199 | if (!is_owner_or_cap(inode)) | 
|  | 200 | return -EACCES; | 
|  | 201 |  | 
|  | 202 | err = get_user(flags, (int __user *)arg); | 
|  | 203 | if (err) | 
|  | 204 | return err; | 
|  | 205 |  | 
|  | 206 | mutex_lock(&inode->i_mutex); | 
|  | 207 | oldflags = li->li_flags; | 
|  | 208 | flags &= LOGFS_FL_USER_MODIFIABLE; | 
|  | 209 | flags |= oldflags & ~LOGFS_FL_USER_MODIFIABLE; | 
|  | 210 | li->li_flags = flags; | 
|  | 211 | mutex_unlock(&inode->i_mutex); | 
|  | 212 |  | 
|  | 213 | inode->i_ctime = CURRENT_TIME; | 
|  | 214 | mark_inode_dirty_sync(inode); | 
|  | 215 | return 0; | 
|  | 216 |  | 
|  | 217 | default: | 
|  | 218 | return -ENOTTY; | 
|  | 219 | } | 
|  | 220 | } | 
|  | 221 |  | 
| Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 222 | int logfs_fsync(struct file *file, int datasync) | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 223 | { | 
| Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 224 | struct super_block *sb = file->f_mapping->host->i_sb; | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 225 |  | 
| Joern Engel | c0c79c3 | 2010-05-05 22:33:36 +0200 | [diff] [blame] | 226 | logfs_write_anchor(sb); | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 227 | return 0; | 
|  | 228 | } | 
|  | 229 |  | 
|  | 230 | static int logfs_setattr(struct dentry *dentry, struct iattr *attr) | 
|  | 231 | { | 
|  | 232 | struct inode *inode = dentry->d_inode; | 
|  | 233 | int err = 0; | 
|  | 234 |  | 
| Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 235 | err = inode_change_ok(inode, attr); | 
|  | 236 | if (err) | 
|  | 237 | return err; | 
|  | 238 |  | 
| Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 239 | if (attr->ia_valid & ATTR_SIZE) { | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 240 | err = logfs_truncate(inode, attr->ia_size); | 
| Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 241 | if (err) | 
|  | 242 | return err; | 
|  | 243 | } | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 244 |  | 
| Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 245 | setattr_copy(inode, attr); | 
|  | 246 | mark_inode_dirty(inode); | 
|  | 247 | return 0; | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 248 | } | 
|  | 249 |  | 
|  | 250 | const struct inode_operations logfs_reg_iops = { | 
|  | 251 | .setattr	= logfs_setattr, | 
|  | 252 | }; | 
|  | 253 |  | 
|  | 254 | const struct file_operations logfs_reg_fops = { | 
|  | 255 | .aio_read	= generic_file_aio_read, | 
|  | 256 | .aio_write	= generic_file_aio_write, | 
|  | 257 | .fsync		= logfs_fsync, | 
| Arnd Bergmann | 02d6d68 | 2010-04-27 22:30:06 +0200 | [diff] [blame] | 258 | .unlocked_ioctl	= logfs_ioctl, | 
| Joern Engel | 5db53f3 | 2009-11-20 20:13:39 +0100 | [diff] [blame] | 259 | .llseek		= generic_file_llseek, | 
|  | 260 | .mmap		= generic_file_readonly_mmap, | 
|  | 261 | .open		= generic_file_open, | 
|  | 262 | .read		= do_sync_read, | 
|  | 263 | .write		= do_sync_write, | 
|  | 264 | }; | 
|  | 265 |  | 
|  | 266 | const struct address_space_operations logfs_reg_aops = { | 
|  | 267 | .invalidatepage	= logfs_invalidatepage, | 
|  | 268 | .readpage	= logfs_readpage, | 
|  | 269 | .releasepage	= logfs_releasepage, | 
|  | 270 | .set_page_dirty	= __set_page_dirty_nobuffers, | 
|  | 271 | .writepage	= logfs_writepage, | 
|  | 272 | .writepages	= generic_writepages, | 
|  | 273 | .write_begin	= logfs_write_begin, | 
|  | 274 | .write_end	= logfs_write_end, | 
|  | 275 | }; |