| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved. | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify it | 
|  | 5 | * under the terms of version 2 of the GNU General Public License as | 
|  | 6 | * published by the Free Software Foundation. | 
|  | 7 | * | 
|  | 8 | * This program is distributed in the hope that it would be useful, but | 
|  | 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 
|  | 11 | * | 
|  | 12 | * Further, this software is distributed without any warranty that it is | 
|  | 13 | * free of the rightful claim of any third person regarding infringement | 
|  | 14 | * or the like.	 Any license provided herein, whether implied or | 
|  | 15 | * otherwise, applies only to this software file.  Patent licenses, if | 
|  | 16 | * any, provided herein do not apply to combinations of this program with | 
|  | 17 | * other software, or any other product whatsoever. | 
|  | 18 | * | 
|  | 19 | * You should have received a copy of the GNU General Public License along | 
|  | 20 | * with this program; if not, write the Free Software Foundation, Inc., 59 | 
|  | 21 | * Temple Place - Suite 330, Boston MA 02111-1307, USA. | 
|  | 22 | * | 
|  | 23 | * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, | 
|  | 24 | * Mountain View, CA  94043, or: | 
|  | 25 | * | 
|  | 26 | * http://www.sgi.com | 
|  | 27 | * | 
|  | 28 | * For further information regarding this notice, see: | 
|  | 29 | * | 
|  | 30 | * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ | 
|  | 31 | */ | 
|  | 32 |  | 
|  | 33 | #include "xfs.h" | 
|  | 34 | #include "xfs_inum.h" | 
|  | 35 | #include "xfs_log.h" | 
|  | 36 | #include "xfs_sb.h" | 
|  | 37 | #include "xfs_dir.h" | 
|  | 38 | #include "xfs_dir2.h" | 
|  | 39 | #include "xfs_trans.h" | 
|  | 40 | #include "xfs_dmapi.h" | 
|  | 41 | #include "xfs_mount.h" | 
|  | 42 | #include "xfs_bmap_btree.h" | 
|  | 43 | #include "xfs_alloc_btree.h" | 
|  | 44 | #include "xfs_ialloc_btree.h" | 
|  | 45 | #include "xfs_alloc.h" | 
|  | 46 | #include "xfs_btree.h" | 
|  | 47 | #include "xfs_attr_sf.h" | 
|  | 48 | #include "xfs_dir_sf.h" | 
|  | 49 | #include "xfs_dir2_sf.h" | 
|  | 50 | #include "xfs_dinode.h" | 
|  | 51 | #include "xfs_inode.h" | 
|  | 52 | #include "xfs_error.h" | 
|  | 53 | #include "xfs_rw.h" | 
|  | 54 | #include "xfs_iomap.h" | 
|  | 55 | #include <linux/mpage.h> | 
|  | 56 | #include <linux/writeback.h> | 
|  | 57 |  | 
|  | 58 | STATIC void xfs_count_page_state(struct page *, int *, int *, int *); | 
|  | 59 | STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *, | 
|  | 60 | struct writeback_control *wbc, void *, int, int); | 
|  | 61 |  | 
|  | 62 | #if defined(XFS_RW_TRACE) | 
|  | 63 | void | 
|  | 64 | xfs_page_trace( | 
|  | 65 | int		tag, | 
|  | 66 | struct inode	*inode, | 
|  | 67 | struct page	*page, | 
|  | 68 | int		mask) | 
|  | 69 | { | 
|  | 70 | xfs_inode_t	*ip; | 
|  | 71 | bhv_desc_t	*bdp; | 
|  | 72 | vnode_t		*vp = LINVFS_GET_VP(inode); | 
|  | 73 | loff_t		isize = i_size_read(inode); | 
|  | 74 | loff_t		offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | 
|  | 75 | int		delalloc = -1, unmapped = -1, unwritten = -1; | 
|  | 76 |  | 
|  | 77 | if (page_has_buffers(page)) | 
|  | 78 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | 
|  | 79 |  | 
|  | 80 | bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); | 
|  | 81 | ip = XFS_BHVTOI(bdp); | 
|  | 82 | if (!ip->i_rwtrace) | 
|  | 83 | return; | 
|  | 84 |  | 
|  | 85 | ktrace_enter(ip->i_rwtrace, | 
|  | 86 | (void *)((unsigned long)tag), | 
|  | 87 | (void *)ip, | 
|  | 88 | (void *)inode, | 
|  | 89 | (void *)page, | 
|  | 90 | (void *)((unsigned long)mask), | 
|  | 91 | (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), | 
|  | 92 | (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), | 
|  | 93 | (void *)((unsigned long)((isize >> 32) & 0xffffffff)), | 
|  | 94 | (void *)((unsigned long)(isize & 0xffffffff)), | 
|  | 95 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | 
|  | 96 | (void *)((unsigned long)(offset & 0xffffffff)), | 
|  | 97 | (void *)((unsigned long)delalloc), | 
|  | 98 | (void *)((unsigned long)unmapped), | 
|  | 99 | (void *)((unsigned long)unwritten), | 
|  | 100 | (void *)NULL, | 
|  | 101 | (void *)NULL); | 
|  | 102 | } | 
|  | 103 | #else | 
|  | 104 | #define xfs_page_trace(tag, inode, page, mask) | 
|  | 105 | #endif | 
|  | 106 |  | 
|  | 107 | void | 
|  | 108 | linvfs_unwritten_done( | 
|  | 109 | struct buffer_head	*bh, | 
|  | 110 | int			uptodate) | 
|  | 111 | { | 
|  | 112 | xfs_buf_t		*pb = (xfs_buf_t *)bh->b_private; | 
|  | 113 |  | 
|  | 114 | ASSERT(buffer_unwritten(bh)); | 
|  | 115 | bh->b_end_io = NULL; | 
|  | 116 | clear_buffer_unwritten(bh); | 
|  | 117 | if (!uptodate) | 
|  | 118 | pagebuf_ioerror(pb, EIO); | 
|  | 119 | if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { | 
|  | 120 | pagebuf_iodone(pb, 1, 1); | 
|  | 121 | } | 
|  | 122 | end_buffer_async_write(bh, uptodate); | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 | /* | 
|  | 126 | * Issue transactions to convert a buffer range from unwritten | 
|  | 127 | * to written extents (buffered IO). | 
|  | 128 | */ | 
|  | 129 | STATIC void | 
|  | 130 | linvfs_unwritten_convert( | 
|  | 131 | xfs_buf_t	*bp) | 
|  | 132 | { | 
|  | 133 | vnode_t		*vp = XFS_BUF_FSPRIVATE(bp, vnode_t *); | 
|  | 134 | int		error; | 
|  | 135 |  | 
|  | 136 | BUG_ON(atomic_read(&bp->pb_hold) < 1); | 
|  | 137 | VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp), | 
|  | 138 | BMAPI_UNWRITTEN, NULL, NULL, error); | 
|  | 139 | XFS_BUF_SET_FSPRIVATE(bp, NULL); | 
|  | 140 | XFS_BUF_CLR_IODONE_FUNC(bp); | 
|  | 141 | XFS_BUF_UNDATAIO(bp); | 
|  | 142 | iput(LINVFS_GET_IP(vp)); | 
|  | 143 | pagebuf_iodone(bp, 0, 0); | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | /* | 
|  | 147 | * Issue transactions to convert a buffer range from unwritten | 
|  | 148 | * to written extents (direct IO). | 
|  | 149 | */ | 
|  | 150 | STATIC void | 
|  | 151 | linvfs_unwritten_convert_direct( | 
|  | 152 | struct inode	*inode, | 
|  | 153 | loff_t		offset, | 
|  | 154 | ssize_t		size, | 
|  | 155 | void		*private) | 
|  | 156 | { | 
|  | 157 | ASSERT(!private || inode == (struct inode *)private); | 
|  | 158 |  | 
|  | 159 | /* private indicates an unwritten extent lay beneath this IO */ | 
|  | 160 | if (private && size > 0) { | 
|  | 161 | vnode_t	*vp = LINVFS_GET_VP(inode); | 
|  | 162 | int	error; | 
|  | 163 |  | 
|  | 164 | VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error); | 
|  | 165 | } | 
|  | 166 | } | 
|  | 167 |  | 
|  | 168 | STATIC int | 
|  | 169 | xfs_map_blocks( | 
|  | 170 | struct inode		*inode, | 
|  | 171 | loff_t			offset, | 
|  | 172 | ssize_t			count, | 
|  | 173 | xfs_iomap_t		*mapp, | 
|  | 174 | int			flags) | 
|  | 175 | { | 
|  | 176 | vnode_t			*vp = LINVFS_GET_VP(inode); | 
|  | 177 | int			error, nmaps = 1; | 
|  | 178 |  | 
|  | 179 | VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error); | 
|  | 180 | if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE))) | 
|  | 181 | VMODIFY(vp); | 
|  | 182 | return -error; | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 | /* | 
|  | 186 | * Finds the corresponding mapping in block @map array of the | 
|  | 187 | * given @offset within a @page. | 
|  | 188 | */ | 
|  | 189 | STATIC xfs_iomap_t * | 
|  | 190 | xfs_offset_to_map( | 
|  | 191 | struct page		*page, | 
|  | 192 | xfs_iomap_t		*iomapp, | 
|  | 193 | unsigned long		offset) | 
|  | 194 | { | 
|  | 195 | loff_t			full_offset;	/* offset from start of file */ | 
|  | 196 |  | 
|  | 197 | ASSERT(offset < PAGE_CACHE_SIZE); | 
|  | 198 |  | 
|  | 199 | full_offset = page->index;		/* NB: using 64bit number */ | 
|  | 200 | full_offset <<= PAGE_CACHE_SHIFT;	/* offset from file start */ | 
|  | 201 | full_offset += offset;			/* offset from page start */ | 
|  | 202 |  | 
|  | 203 | if (full_offset < iomapp->iomap_offset) | 
|  | 204 | return NULL; | 
|  | 205 | if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset) | 
|  | 206 | return iomapp; | 
|  | 207 | return NULL; | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | STATIC void | 
|  | 211 | xfs_map_at_offset( | 
|  | 212 | struct page		*page, | 
|  | 213 | struct buffer_head	*bh, | 
|  | 214 | unsigned long		offset, | 
|  | 215 | int			block_bits, | 
|  | 216 | xfs_iomap_t		*iomapp) | 
|  | 217 | { | 
|  | 218 | xfs_daddr_t		bn; | 
|  | 219 | loff_t			delta; | 
|  | 220 | int			sector_shift; | 
|  | 221 |  | 
|  | 222 | ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); | 
|  | 223 | ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); | 
|  | 224 | ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); | 
|  | 225 |  | 
|  | 226 | delta = page->index; | 
|  | 227 | delta <<= PAGE_CACHE_SHIFT; | 
|  | 228 | delta += offset; | 
|  | 229 | delta -= iomapp->iomap_offset; | 
|  | 230 | delta >>= block_bits; | 
|  | 231 |  | 
|  | 232 | sector_shift = block_bits - BBSHIFT; | 
|  | 233 | bn = iomapp->iomap_bn >> sector_shift; | 
|  | 234 | bn += delta; | 
|  | 235 | BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME)); | 
|  | 236 | ASSERT((bn << sector_shift) >= iomapp->iomap_bn); | 
|  | 237 |  | 
|  | 238 | lock_buffer(bh); | 
|  | 239 | bh->b_blocknr = bn; | 
|  | 240 | bh->b_bdev = iomapp->iomap_target->pbr_bdev; | 
|  | 241 | set_buffer_mapped(bh); | 
|  | 242 | clear_buffer_delay(bh); | 
|  | 243 | } | 
|  | 244 |  | 
|  | 245 | /* | 
|  | 246 | * Look for a page at index which is unlocked and contains our | 
|  | 247 | * unwritten extent flagged buffers at its head.  Returns page | 
|  | 248 | * locked and with an extra reference count, and length of the | 
|  | 249 | * unwritten extent component on this page that we can write, | 
|  | 250 | * in units of filesystem blocks. | 
|  | 251 | */ | 
|  | 252 | STATIC struct page * | 
|  | 253 | xfs_probe_unwritten_page( | 
|  | 254 | struct address_space	*mapping, | 
|  | 255 | pgoff_t			index, | 
|  | 256 | xfs_iomap_t		*iomapp, | 
|  | 257 | xfs_buf_t		*pb, | 
|  | 258 | unsigned long		max_offset, | 
|  | 259 | unsigned long		*fsbs, | 
|  | 260 | unsigned int            bbits) | 
|  | 261 | { | 
|  | 262 | struct page		*page; | 
|  | 263 |  | 
|  | 264 | page = find_trylock_page(mapping, index); | 
|  | 265 | if (!page) | 
|  | 266 | return NULL; | 
|  | 267 | if (PageWriteback(page)) | 
|  | 268 | goto out; | 
|  | 269 |  | 
|  | 270 | if (page->mapping && page_has_buffers(page)) { | 
|  | 271 | struct buffer_head	*bh, *head; | 
|  | 272 | unsigned long		p_offset = 0; | 
|  | 273 |  | 
|  | 274 | *fsbs = 0; | 
|  | 275 | bh = head = page_buffers(page); | 
|  | 276 | do { | 
|  | 277 | if (!buffer_unwritten(bh) || !buffer_uptodate(bh)) | 
|  | 278 | break; | 
|  | 279 | if (!xfs_offset_to_map(page, iomapp, p_offset)) | 
|  | 280 | break; | 
|  | 281 | if (p_offset >= max_offset) | 
|  | 282 | break; | 
|  | 283 | xfs_map_at_offset(page, bh, p_offset, bbits, iomapp); | 
|  | 284 | set_buffer_unwritten_io(bh); | 
|  | 285 | bh->b_private = pb; | 
|  | 286 | p_offset += bh->b_size; | 
|  | 287 | (*fsbs)++; | 
|  | 288 | } while ((bh = bh->b_this_page) != head); | 
|  | 289 |  | 
|  | 290 | if (p_offset) | 
|  | 291 | return page; | 
|  | 292 | } | 
|  | 293 |  | 
|  | 294 | out: | 
|  | 295 | unlock_page(page); | 
|  | 296 | return NULL; | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | /* | 
|  | 300 | * Look for a page at index which is unlocked and not mapped | 
|  | 301 | * yet - clustering for mmap write case. | 
|  | 302 | */ | 
|  | 303 | STATIC unsigned int | 
|  | 304 | xfs_probe_unmapped_page( | 
|  | 305 | struct address_space	*mapping, | 
|  | 306 | pgoff_t			index, | 
|  | 307 | unsigned int		pg_offset) | 
|  | 308 | { | 
|  | 309 | struct page		*page; | 
|  | 310 | int			ret = 0; | 
|  | 311 |  | 
|  | 312 | page = find_trylock_page(mapping, index); | 
|  | 313 | if (!page) | 
|  | 314 | return 0; | 
|  | 315 | if (PageWriteback(page)) | 
|  | 316 | goto out; | 
|  | 317 |  | 
|  | 318 | if (page->mapping && PageDirty(page)) { | 
|  | 319 | if (page_has_buffers(page)) { | 
|  | 320 | struct buffer_head	*bh, *head; | 
|  | 321 |  | 
|  | 322 | bh = head = page_buffers(page); | 
|  | 323 | do { | 
|  | 324 | if (buffer_mapped(bh) || !buffer_uptodate(bh)) | 
|  | 325 | break; | 
|  | 326 | ret += bh->b_size; | 
|  | 327 | if (ret >= pg_offset) | 
|  | 328 | break; | 
|  | 329 | } while ((bh = bh->b_this_page) != head); | 
|  | 330 | } else | 
|  | 331 | ret = PAGE_CACHE_SIZE; | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | out: | 
|  | 335 | unlock_page(page); | 
|  | 336 | return ret; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | STATIC unsigned int | 
|  | 340 | xfs_probe_unmapped_cluster( | 
|  | 341 | struct inode		*inode, | 
|  | 342 | struct page		*startpage, | 
|  | 343 | struct buffer_head	*bh, | 
|  | 344 | struct buffer_head	*head) | 
|  | 345 | { | 
|  | 346 | pgoff_t			tindex, tlast, tloff; | 
|  | 347 | unsigned int		pg_offset, len, total = 0; | 
|  | 348 | struct address_space	*mapping = inode->i_mapping; | 
|  | 349 |  | 
|  | 350 | /* First sum forwards in this page */ | 
|  | 351 | do { | 
|  | 352 | if (buffer_mapped(bh)) | 
|  | 353 | break; | 
|  | 354 | total += bh->b_size; | 
|  | 355 | } while ((bh = bh->b_this_page) != head); | 
|  | 356 |  | 
|  | 357 | /* If we reached the end of the page, sum forwards in | 
|  | 358 | * following pages. | 
|  | 359 | */ | 
|  | 360 | if (bh == head) { | 
|  | 361 | tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; | 
|  | 362 | /* Prune this back to avoid pathological behavior */ | 
|  | 363 | tloff = min(tlast, startpage->index + 64); | 
|  | 364 | for (tindex = startpage->index + 1; tindex < tloff; tindex++) { | 
|  | 365 | len = xfs_probe_unmapped_page(mapping, tindex, | 
|  | 366 | PAGE_CACHE_SIZE); | 
|  | 367 | if (!len) | 
|  | 368 | return total; | 
|  | 369 | total += len; | 
|  | 370 | } | 
|  | 371 | if (tindex == tlast && | 
|  | 372 | (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { | 
|  | 373 | total += xfs_probe_unmapped_page(mapping, | 
|  | 374 | tindex, pg_offset); | 
|  | 375 | } | 
|  | 376 | } | 
|  | 377 | return total; | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | /* | 
|  | 381 | * Probe for a given page (index) in the inode and test if it is delayed | 
|  | 382 | * and without unwritten buffers.  Returns page locked and with an extra | 
|  | 383 | * reference count. | 
|  | 384 | */ | 
|  | 385 | STATIC struct page * | 
|  | 386 | xfs_probe_delalloc_page( | 
|  | 387 | struct inode		*inode, | 
|  | 388 | pgoff_t			index) | 
|  | 389 | { | 
|  | 390 | struct page		*page; | 
|  | 391 |  | 
|  | 392 | page = find_trylock_page(inode->i_mapping, index); | 
|  | 393 | if (!page) | 
|  | 394 | return NULL; | 
|  | 395 | if (PageWriteback(page)) | 
|  | 396 | goto out; | 
|  | 397 |  | 
|  | 398 | if (page->mapping && page_has_buffers(page)) { | 
|  | 399 | struct buffer_head	*bh, *head; | 
|  | 400 | int			acceptable = 0; | 
|  | 401 |  | 
|  | 402 | bh = head = page_buffers(page); | 
|  | 403 | do { | 
|  | 404 | if (buffer_unwritten(bh)) { | 
|  | 405 | acceptable = 0; | 
|  | 406 | break; | 
|  | 407 | } else if (buffer_delay(bh)) { | 
|  | 408 | acceptable = 1; | 
|  | 409 | } | 
|  | 410 | } while ((bh = bh->b_this_page) != head); | 
|  | 411 |  | 
|  | 412 | if (acceptable) | 
|  | 413 | return page; | 
|  | 414 | } | 
|  | 415 |  | 
|  | 416 | out: | 
|  | 417 | unlock_page(page); | 
|  | 418 | return NULL; | 
|  | 419 | } | 
|  | 420 |  | 
|  | 421 | STATIC int | 
|  | 422 | xfs_map_unwritten( | 
|  | 423 | struct inode		*inode, | 
|  | 424 | struct page		*start_page, | 
|  | 425 | struct buffer_head	*head, | 
|  | 426 | struct buffer_head	*curr, | 
|  | 427 | unsigned long		p_offset, | 
|  | 428 | int			block_bits, | 
|  | 429 | xfs_iomap_t		*iomapp, | 
|  | 430 | struct writeback_control *wbc, | 
|  | 431 | int			startio, | 
|  | 432 | int			all_bh) | 
|  | 433 | { | 
|  | 434 | struct buffer_head	*bh = curr; | 
|  | 435 | xfs_iomap_t		*tmp; | 
|  | 436 | xfs_buf_t		*pb; | 
|  | 437 | loff_t			offset, size; | 
|  | 438 | unsigned long		nblocks = 0; | 
|  | 439 |  | 
|  | 440 | offset = start_page->index; | 
|  | 441 | offset <<= PAGE_CACHE_SHIFT; | 
|  | 442 | offset += p_offset; | 
|  | 443 |  | 
|  | 444 | /* get an "empty" pagebuf to manage IO completion | 
|  | 445 | * Proper values will be set before returning */ | 
|  | 446 | pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0); | 
|  | 447 | if (!pb) | 
|  | 448 | return -EAGAIN; | 
|  | 449 |  | 
|  | 450 | /* Take a reference to the inode to prevent it from | 
|  | 451 | * being reclaimed while we have outstanding unwritten | 
|  | 452 | * extent IO on it. | 
|  | 453 | */ | 
|  | 454 | if ((igrab(inode)) != inode) { | 
|  | 455 | pagebuf_free(pb); | 
|  | 456 | return -EAGAIN; | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | /* Set the count to 1 initially, this will stop an I/O | 
|  | 460 | * completion callout which happens before we have started | 
|  | 461 | * all the I/O from calling pagebuf_iodone too early. | 
|  | 462 | */ | 
|  | 463 | atomic_set(&pb->pb_io_remaining, 1); | 
|  | 464 |  | 
|  | 465 | /* First map forwards in the page consecutive buffers | 
|  | 466 | * covering this unwritten extent | 
|  | 467 | */ | 
|  | 468 | do { | 
|  | 469 | if (!buffer_unwritten(bh)) | 
|  | 470 | break; | 
|  | 471 | tmp = xfs_offset_to_map(start_page, iomapp, p_offset); | 
|  | 472 | if (!tmp) | 
|  | 473 | break; | 
|  | 474 | xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp); | 
|  | 475 | set_buffer_unwritten_io(bh); | 
|  | 476 | bh->b_private = pb; | 
|  | 477 | p_offset += bh->b_size; | 
|  | 478 | nblocks++; | 
|  | 479 | } while ((bh = bh->b_this_page) != head); | 
|  | 480 |  | 
|  | 481 | atomic_add(nblocks, &pb->pb_io_remaining); | 
|  | 482 |  | 
|  | 483 | /* If we reached the end of the page, map forwards in any | 
|  | 484 | * following pages which are also covered by this extent. | 
|  | 485 | */ | 
|  | 486 | if (bh == head) { | 
|  | 487 | struct address_space	*mapping = inode->i_mapping; | 
|  | 488 | pgoff_t			tindex, tloff, tlast; | 
|  | 489 | unsigned long		bs; | 
|  | 490 | unsigned int		pg_offset, bbits = inode->i_blkbits; | 
|  | 491 | struct page		*page; | 
|  | 492 |  | 
|  | 493 | tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; | 
|  | 494 | tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT; | 
|  | 495 | tloff = min(tlast, tloff); | 
|  | 496 | for (tindex = start_page->index + 1; tindex < tloff; tindex++) { | 
|  | 497 | page = xfs_probe_unwritten_page(mapping, | 
|  | 498 | tindex, iomapp, pb, | 
|  | 499 | PAGE_CACHE_SIZE, &bs, bbits); | 
|  | 500 | if (!page) | 
|  | 501 | break; | 
|  | 502 | nblocks += bs; | 
|  | 503 | atomic_add(bs, &pb->pb_io_remaining); | 
|  | 504 | xfs_convert_page(inode, page, iomapp, wbc, pb, | 
|  | 505 | startio, all_bh); | 
|  | 506 | /* stop if converting the next page might add | 
|  | 507 | * enough blocks that the corresponding byte | 
|  | 508 | * count won't fit in our ulong page buf length */ | 
|  | 509 | if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) | 
|  | 510 | goto enough; | 
|  | 511 | } | 
|  | 512 |  | 
|  | 513 | if (tindex == tlast && | 
|  | 514 | (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) { | 
|  | 515 | page = xfs_probe_unwritten_page(mapping, | 
|  | 516 | tindex, iomapp, pb, | 
|  | 517 | pg_offset, &bs, bbits); | 
|  | 518 | if (page) { | 
|  | 519 | nblocks += bs; | 
|  | 520 | atomic_add(bs, &pb->pb_io_remaining); | 
|  | 521 | xfs_convert_page(inode, page, iomapp, wbc, pb, | 
|  | 522 | startio, all_bh); | 
|  | 523 | if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) | 
|  | 524 | goto enough; | 
|  | 525 | } | 
|  | 526 | } | 
|  | 527 | } | 
|  | 528 |  | 
|  | 529 | enough: | 
|  | 530 | size = nblocks;		/* NB: using 64bit number here */ | 
|  | 531 | size <<= block_bits;	/* convert fsb's to byte range */ | 
|  | 532 |  | 
|  | 533 | XFS_BUF_DATAIO(pb); | 
|  | 534 | XFS_BUF_ASYNC(pb); | 
|  | 535 | XFS_BUF_SET_SIZE(pb, size); | 
|  | 536 | XFS_BUF_SET_COUNT(pb, size); | 
|  | 537 | XFS_BUF_SET_OFFSET(pb, offset); | 
|  | 538 | XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode)); | 
|  | 539 | XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert); | 
|  | 540 |  | 
|  | 541 | if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { | 
|  | 542 | pagebuf_iodone(pb, 1, 1); | 
|  | 543 | } | 
|  | 544 |  | 
|  | 545 | return 0; | 
|  | 546 | } | 
|  | 547 |  | 
|  | 548 | STATIC void | 
|  | 549 | xfs_submit_page( | 
|  | 550 | struct page		*page, | 
|  | 551 | struct writeback_control *wbc, | 
|  | 552 | struct buffer_head	*bh_arr[], | 
|  | 553 | int			bh_count, | 
|  | 554 | int			probed_page, | 
|  | 555 | int			clear_dirty) | 
|  | 556 | { | 
|  | 557 | struct buffer_head	*bh; | 
|  | 558 | int			i; | 
|  | 559 |  | 
|  | 560 | BUG_ON(PageWriteback(page)); | 
|  | 561 | set_page_writeback(page); | 
|  | 562 | if (clear_dirty) | 
|  | 563 | clear_page_dirty(page); | 
|  | 564 | unlock_page(page); | 
|  | 565 |  | 
|  | 566 | if (bh_count) { | 
|  | 567 | for (i = 0; i < bh_count; i++) { | 
|  | 568 | bh = bh_arr[i]; | 
|  | 569 | mark_buffer_async_write(bh); | 
|  | 570 | if (buffer_unwritten(bh)) | 
|  | 571 | set_buffer_unwritten_io(bh); | 
|  | 572 | set_buffer_uptodate(bh); | 
|  | 573 | clear_buffer_dirty(bh); | 
|  | 574 | } | 
|  | 575 |  | 
|  | 576 | for (i = 0; i < bh_count; i++) | 
|  | 577 | submit_bh(WRITE, bh_arr[i]); | 
|  | 578 |  | 
|  | 579 | if (probed_page && clear_dirty) | 
|  | 580 | wbc->nr_to_write--;	/* Wrote an "extra" page */ | 
|  | 581 | } else { | 
|  | 582 | end_page_writeback(page); | 
|  | 583 | wbc->pages_skipped++;	/* We didn't write this page */ | 
|  | 584 | } | 
|  | 585 | } | 
|  | 586 |  | 
|  | 587 | /* | 
|  | 588 | * Allocate & map buffers for page given the extent map. Write it out. | 
|  | 589 | * except for the original page of a writepage, this is called on | 
|  | 590 | * delalloc/unwritten pages only, for the original page it is possible | 
|  | 591 | * that the page has no mapping at all. | 
|  | 592 | */ | 
|  | 593 | STATIC void | 
|  | 594 | xfs_convert_page( | 
|  | 595 | struct inode		*inode, | 
|  | 596 | struct page		*page, | 
|  | 597 | xfs_iomap_t		*iomapp, | 
|  | 598 | struct writeback_control *wbc, | 
|  | 599 | void			*private, | 
|  | 600 | int			startio, | 
|  | 601 | int			all_bh) | 
|  | 602 | { | 
|  | 603 | struct buffer_head	*bh_arr[MAX_BUF_PER_PAGE], *bh, *head; | 
|  | 604 | xfs_iomap_t		*mp = iomapp, *tmp; | 
|  | 605 | unsigned long		end, offset; | 
|  | 606 | pgoff_t			end_index; | 
|  | 607 | int			i = 0, index = 0; | 
|  | 608 | int			bbits = inode->i_blkbits; | 
|  | 609 |  | 
|  | 610 | end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; | 
|  | 611 | if (page->index < end_index) { | 
|  | 612 | end = PAGE_CACHE_SIZE; | 
|  | 613 | } else { | 
|  | 614 | end = i_size_read(inode) & (PAGE_CACHE_SIZE-1); | 
|  | 615 | } | 
|  | 616 | bh = head = page_buffers(page); | 
|  | 617 | do { | 
|  | 618 | offset = i << bbits; | 
|  | 619 | if (offset >= end) | 
|  | 620 | break; | 
|  | 621 | if (!(PageUptodate(page) || buffer_uptodate(bh))) | 
|  | 622 | continue; | 
|  | 623 | if (buffer_mapped(bh) && all_bh && | 
|  | 624 | !(buffer_unwritten(bh) || buffer_delay(bh))) { | 
|  | 625 | if (startio) { | 
|  | 626 | lock_buffer(bh); | 
|  | 627 | bh_arr[index++] = bh; | 
|  | 628 | } | 
|  | 629 | continue; | 
|  | 630 | } | 
|  | 631 | tmp = xfs_offset_to_map(page, mp, offset); | 
|  | 632 | if (!tmp) | 
|  | 633 | continue; | 
|  | 634 | ASSERT(!(tmp->iomap_flags & IOMAP_HOLE)); | 
|  | 635 | ASSERT(!(tmp->iomap_flags & IOMAP_DELAY)); | 
|  | 636 |  | 
|  | 637 | /* If this is a new unwritten extent buffer (i.e. one | 
|  | 638 | * that we haven't passed in private data for, we must | 
|  | 639 | * now map this buffer too. | 
|  | 640 | */ | 
|  | 641 | if (buffer_unwritten(bh) && !bh->b_end_io) { | 
|  | 642 | ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN); | 
|  | 643 | xfs_map_unwritten(inode, page, head, bh, offset, | 
|  | 644 | bbits, tmp, wbc, startio, all_bh); | 
|  | 645 | } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) { | 
|  | 646 | xfs_map_at_offset(page, bh, offset, bbits, tmp); | 
|  | 647 | if (buffer_unwritten(bh)) { | 
|  | 648 | set_buffer_unwritten_io(bh); | 
|  | 649 | bh->b_private = private; | 
|  | 650 | ASSERT(private); | 
|  | 651 | } | 
|  | 652 | } | 
|  | 653 | if (startio) { | 
|  | 654 | bh_arr[index++] = bh; | 
|  | 655 | } else { | 
|  | 656 | set_buffer_dirty(bh); | 
|  | 657 | unlock_buffer(bh); | 
|  | 658 | mark_buffer_dirty(bh); | 
|  | 659 | } | 
|  | 660 | } while (i++, (bh = bh->b_this_page) != head); | 
|  | 661 |  | 
|  | 662 | if (startio) { | 
|  | 663 | xfs_submit_page(page, wbc, bh_arr, index, 1, index == i); | 
|  | 664 | } else { | 
|  | 665 | unlock_page(page); | 
|  | 666 | } | 
|  | 667 | } | 
|  | 668 |  | 
|  | 669 | /* | 
|  | 670 | * Convert & write out a cluster of pages in the same extent as defined | 
|  | 671 | * by mp and following the start page. | 
|  | 672 | */ | 
|  | 673 | STATIC void | 
|  | 674 | xfs_cluster_write( | 
|  | 675 | struct inode		*inode, | 
|  | 676 | pgoff_t			tindex, | 
|  | 677 | xfs_iomap_t		*iomapp, | 
|  | 678 | struct writeback_control *wbc, | 
|  | 679 | int			startio, | 
|  | 680 | int			all_bh, | 
|  | 681 | pgoff_t			tlast) | 
|  | 682 | { | 
|  | 683 | struct page		*page; | 
|  | 684 |  | 
|  | 685 | for (; tindex <= tlast; tindex++) { | 
|  | 686 | page = xfs_probe_delalloc_page(inode, tindex); | 
|  | 687 | if (!page) | 
|  | 688 | break; | 
|  | 689 | xfs_convert_page(inode, page, iomapp, wbc, NULL, | 
|  | 690 | startio, all_bh); | 
|  | 691 | } | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | /* | 
|  | 695 | * Calling this without startio set means we are being asked to make a dirty | 
|  | 696 | * page ready for freeing it's buffers.  When called with startio set then | 
|  | 697 | * we are coming from writepage. | 
|  | 698 | * | 
|  | 699 | * When called with startio set it is important that we write the WHOLE | 
|  | 700 | * page if possible. | 
|  | 701 | * The bh->b_state's cannot know if any of the blocks or which block for | 
|  | 702 | * that matter are dirty due to mmap writes, and therefore bh uptodate is | 
|  | 703 | * only vaild if the page itself isn't completely uptodate.  Some layers | 
|  | 704 | * may clear the page dirty flag prior to calling write page, under the | 
|  | 705 | * assumption the entire page will be written out; by not writing out the | 
|  | 706 | * whole page the page can be reused before all valid dirty data is | 
|  | 707 | * written out.  Note: in the case of a page that has been dirty'd by | 
|  | 708 | * mapwrite and but partially setup by block_prepare_write the | 
|  | 709 | * bh->b_states's will not agree and only ones setup by BPW/BCW will have | 
|  | 710 | * valid state, thus the whole page must be written out thing. | 
|  | 711 | */ | 
|  | 712 |  | 
|  | 713 | STATIC int | 
|  | 714 | xfs_page_state_convert( | 
|  | 715 | struct inode	*inode, | 
|  | 716 | struct page	*page, | 
|  | 717 | struct writeback_control *wbc, | 
|  | 718 | int		startio, | 
|  | 719 | int		unmapped) /* also implies page uptodate */ | 
|  | 720 | { | 
|  | 721 | struct buffer_head	*bh_arr[MAX_BUF_PER_PAGE], *bh, *head; | 
|  | 722 | xfs_iomap_t		*iomp, iomap; | 
|  | 723 | loff_t			offset; | 
|  | 724 | unsigned long           p_offset = 0; | 
|  | 725 | __uint64_t              end_offset; | 
|  | 726 | pgoff_t                 end_index, last_index, tlast; | 
|  | 727 | int			len, err, i, cnt = 0, uptodate = 1; | 
|  | 728 | int			flags = startio ? 0 : BMAPI_TRYLOCK; | 
|  | 729 | int			page_dirty, delalloc = 0; | 
|  | 730 |  | 
|  | 731 | /* Is this page beyond the end of the file? */ | 
|  | 732 | offset = i_size_read(inode); | 
|  | 733 | end_index = offset >> PAGE_CACHE_SHIFT; | 
|  | 734 | last_index = (offset - 1) >> PAGE_CACHE_SHIFT; | 
|  | 735 | if (page->index >= end_index) { | 
|  | 736 | if ((page->index >= end_index + 1) || | 
|  | 737 | !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { | 
|  | 738 | err = -EIO; | 
|  | 739 | goto error; | 
|  | 740 | } | 
|  | 741 | } | 
|  | 742 |  | 
|  | 743 | offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | 
|  | 744 | end_offset = min_t(unsigned long long, | 
|  | 745 | offset + PAGE_CACHE_SIZE, i_size_read(inode)); | 
|  | 746 |  | 
|  | 747 | bh = head = page_buffers(page); | 
|  | 748 | iomp = NULL; | 
|  | 749 |  | 
|  | 750 | /* | 
|  | 751 | * page_dirty is initially a count of buffers on the page and | 
|  | 752 | * is decrememted as we move each into a cleanable state. | 
|  | 753 | */ | 
|  | 754 | len = bh->b_size; | 
|  | 755 | page_dirty = PAGE_CACHE_SIZE / len; | 
|  | 756 |  | 
|  | 757 | do { | 
|  | 758 | if (offset >= end_offset) | 
|  | 759 | break; | 
|  | 760 | if (!buffer_uptodate(bh)) | 
|  | 761 | uptodate = 0; | 
|  | 762 | if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) | 
|  | 763 | continue; | 
|  | 764 |  | 
|  | 765 | if (iomp) { | 
|  | 766 | iomp = xfs_offset_to_map(page, &iomap, p_offset); | 
|  | 767 | } | 
|  | 768 |  | 
|  | 769 | /* | 
|  | 770 | * First case, map an unwritten extent and prepare for | 
|  | 771 | * extent state conversion transaction on completion. | 
|  | 772 | */ | 
|  | 773 | if (buffer_unwritten(bh)) { | 
|  | 774 | if (!startio) | 
|  | 775 | continue; | 
|  | 776 | if (!iomp) { | 
|  | 777 | err = xfs_map_blocks(inode, offset, len, &iomap, | 
|  | 778 | BMAPI_READ|BMAPI_IGNSTATE); | 
|  | 779 | if (err) { | 
|  | 780 | goto error; | 
|  | 781 | } | 
|  | 782 | iomp = xfs_offset_to_map(page, &iomap, | 
|  | 783 | p_offset); | 
|  | 784 | } | 
|  | 785 | if (iomp) { | 
|  | 786 | if (!bh->b_end_io) { | 
|  | 787 | err = xfs_map_unwritten(inode, page, | 
|  | 788 | head, bh, p_offset, | 
|  | 789 | inode->i_blkbits, iomp, | 
|  | 790 | wbc, startio, unmapped); | 
|  | 791 | if (err) { | 
|  | 792 | goto error; | 
|  | 793 | } | 
|  | 794 | } else { | 
|  | 795 | set_bit(BH_Lock, &bh->b_state); | 
|  | 796 | } | 
|  | 797 | BUG_ON(!buffer_locked(bh)); | 
|  | 798 | bh_arr[cnt++] = bh; | 
|  | 799 | page_dirty--; | 
|  | 800 | } | 
|  | 801 | /* | 
|  | 802 | * Second case, allocate space for a delalloc buffer. | 
|  | 803 | * We can return EAGAIN here in the release page case. | 
|  | 804 | */ | 
|  | 805 | } else if (buffer_delay(bh)) { | 
|  | 806 | if (!iomp) { | 
|  | 807 | delalloc = 1; | 
|  | 808 | err = xfs_map_blocks(inode, offset, len, &iomap, | 
|  | 809 | BMAPI_ALLOCATE | flags); | 
|  | 810 | if (err) { | 
|  | 811 | goto error; | 
|  | 812 | } | 
|  | 813 | iomp = xfs_offset_to_map(page, &iomap, | 
|  | 814 | p_offset); | 
|  | 815 | } | 
|  | 816 | if (iomp) { | 
|  | 817 | xfs_map_at_offset(page, bh, p_offset, | 
|  | 818 | inode->i_blkbits, iomp); | 
|  | 819 | if (startio) { | 
|  | 820 | bh_arr[cnt++] = bh; | 
|  | 821 | } else { | 
|  | 822 | set_buffer_dirty(bh); | 
|  | 823 | unlock_buffer(bh); | 
|  | 824 | mark_buffer_dirty(bh); | 
|  | 825 | } | 
|  | 826 | page_dirty--; | 
|  | 827 | } | 
|  | 828 | } else if ((buffer_uptodate(bh) || PageUptodate(page)) && | 
|  | 829 | (unmapped || startio)) { | 
|  | 830 |  | 
|  | 831 | if (!buffer_mapped(bh)) { | 
|  | 832 | int	size; | 
|  | 833 |  | 
|  | 834 | /* | 
|  | 835 | * Getting here implies an unmapped buffer | 
|  | 836 | * was found, and we are in a path where we | 
|  | 837 | * need to write the whole page out. | 
|  | 838 | */ | 
|  | 839 | if (!iomp) { | 
|  | 840 | size = xfs_probe_unmapped_cluster( | 
|  | 841 | inode, page, bh, head); | 
|  | 842 | err = xfs_map_blocks(inode, offset, | 
|  | 843 | size, &iomap, | 
|  | 844 | BMAPI_WRITE|BMAPI_MMAP); | 
|  | 845 | if (err) { | 
|  | 846 | goto error; | 
|  | 847 | } | 
|  | 848 | iomp = xfs_offset_to_map(page, &iomap, | 
|  | 849 | p_offset); | 
|  | 850 | } | 
|  | 851 | if (iomp) { | 
|  | 852 | xfs_map_at_offset(page, | 
|  | 853 | bh, p_offset, | 
|  | 854 | inode->i_blkbits, iomp); | 
|  | 855 | if (startio) { | 
|  | 856 | bh_arr[cnt++] = bh; | 
|  | 857 | } else { | 
|  | 858 | set_buffer_dirty(bh); | 
|  | 859 | unlock_buffer(bh); | 
|  | 860 | mark_buffer_dirty(bh); | 
|  | 861 | } | 
|  | 862 | page_dirty--; | 
|  | 863 | } | 
|  | 864 | } else if (startio) { | 
|  | 865 | if (buffer_uptodate(bh) && | 
|  | 866 | !test_and_set_bit(BH_Lock, &bh->b_state)) { | 
|  | 867 | bh_arr[cnt++] = bh; | 
|  | 868 | page_dirty--; | 
|  | 869 | } | 
|  | 870 | } | 
|  | 871 | } | 
|  | 872 | } while (offset += len, p_offset += len, | 
|  | 873 | ((bh = bh->b_this_page) != head)); | 
|  | 874 |  | 
|  | 875 | if (uptodate && bh == head) | 
|  | 876 | SetPageUptodate(page); | 
|  | 877 |  | 
|  | 878 | if (startio) | 
|  | 879 | xfs_submit_page(page, wbc, bh_arr, cnt, 0, 1); | 
|  | 880 |  | 
|  | 881 | if (iomp) { | 
|  | 882 | tlast = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> | 
|  | 883 | PAGE_CACHE_SHIFT; | 
|  | 884 | if (delalloc && (tlast > last_index)) | 
|  | 885 | tlast = last_index; | 
|  | 886 | xfs_cluster_write(inode, page->index + 1, iomp, wbc, | 
|  | 887 | startio, unmapped, tlast); | 
|  | 888 | } | 
|  | 889 |  | 
|  | 890 | return page_dirty; | 
|  | 891 |  | 
|  | 892 | error: | 
|  | 893 | for (i = 0; i < cnt; i++) { | 
|  | 894 | unlock_buffer(bh_arr[i]); | 
|  | 895 | } | 
|  | 896 |  | 
|  | 897 | /* | 
|  | 898 | * If it's delalloc and we have nowhere to put it, | 
|  | 899 | * throw it away, unless the lower layers told | 
|  | 900 | * us to try again. | 
|  | 901 | */ | 
|  | 902 | if (err != -EAGAIN) { | 
|  | 903 | if (!unmapped) { | 
|  | 904 | block_invalidatepage(page, 0); | 
|  | 905 | } | 
|  | 906 | ClearPageUptodate(page); | 
|  | 907 | } | 
|  | 908 | return err; | 
|  | 909 | } | 
|  | 910 |  | 
|  | 911 | STATIC int | 
|  | 912 | __linvfs_get_block( | 
|  | 913 | struct inode		*inode, | 
|  | 914 | sector_t		iblock, | 
|  | 915 | unsigned long		blocks, | 
|  | 916 | struct buffer_head	*bh_result, | 
|  | 917 | int			create, | 
|  | 918 | int			direct, | 
|  | 919 | bmapi_flags_t		flags) | 
|  | 920 | { | 
|  | 921 | vnode_t			*vp = LINVFS_GET_VP(inode); | 
|  | 922 | xfs_iomap_t		iomap; | 
|  | 923 | int			retpbbm = 1; | 
|  | 924 | int			error; | 
|  | 925 | ssize_t			size; | 
|  | 926 | loff_t			offset = (loff_t)iblock << inode->i_blkbits; | 
|  | 927 |  | 
|  | 928 | if (blocks) | 
|  | 929 | size = blocks << inode->i_blkbits; | 
|  | 930 | else | 
|  | 931 | size = 1 << inode->i_blkbits; | 
|  | 932 |  | 
|  | 933 | VOP_BMAP(vp, offset, size, | 
|  | 934 | create ? flags : BMAPI_READ, &iomap, &retpbbm, error); | 
|  | 935 | if (error) | 
|  | 936 | return -error; | 
|  | 937 |  | 
|  | 938 | if (retpbbm == 0) | 
|  | 939 | return 0; | 
|  | 940 |  | 
|  | 941 | if (iomap.iomap_bn != IOMAP_DADDR_NULL) { | 
|  | 942 | xfs_daddr_t		bn; | 
|  | 943 | loff_t			delta; | 
|  | 944 |  | 
|  | 945 | /* For unwritten extents do not report a disk address on | 
|  | 946 | * the read case (treat as if we're reading into a hole). | 
|  | 947 | */ | 
|  | 948 | if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) { | 
|  | 949 | delta = offset - iomap.iomap_offset; | 
|  | 950 | delta >>= inode->i_blkbits; | 
|  | 951 |  | 
|  | 952 | bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT); | 
|  | 953 | bn += delta; | 
|  | 954 | BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME)); | 
|  | 955 | bh_result->b_blocknr = bn; | 
|  | 956 | set_buffer_mapped(bh_result); | 
|  | 957 | } | 
|  | 958 | if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) { | 
|  | 959 | if (direct) | 
|  | 960 | bh_result->b_private = inode; | 
|  | 961 | set_buffer_unwritten(bh_result); | 
|  | 962 | set_buffer_delay(bh_result); | 
|  | 963 | } | 
|  | 964 | } | 
|  | 965 |  | 
|  | 966 | /* If this is a realtime file, data might be on a new device */ | 
|  | 967 | bh_result->b_bdev = iomap.iomap_target->pbr_bdev; | 
|  | 968 |  | 
|  | 969 | /* If we previously allocated a block out beyond eof and | 
|  | 970 | * we are now coming back to use it then we will need to | 
|  | 971 | * flag it as new even if it has a disk address. | 
|  | 972 | */ | 
|  | 973 | if (create && | 
|  | 974 | ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || | 
|  | 975 | (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) { | 
|  | 976 | set_buffer_new(bh_result); | 
|  | 977 | } | 
|  | 978 |  | 
|  | 979 | if (iomap.iomap_flags & IOMAP_DELAY) { | 
|  | 980 | BUG_ON(direct); | 
|  | 981 | if (create) { | 
|  | 982 | set_buffer_uptodate(bh_result); | 
|  | 983 | set_buffer_mapped(bh_result); | 
|  | 984 | set_buffer_delay(bh_result); | 
|  | 985 | } | 
|  | 986 | } | 
|  | 987 |  | 
|  | 988 | if (blocks) { | 
|  | 989 | bh_result->b_size = (ssize_t)min( | 
|  | 990 | (loff_t)(iomap.iomap_bsize - iomap.iomap_delta), | 
|  | 991 | (loff_t)(blocks << inode->i_blkbits)); | 
|  | 992 | } | 
|  | 993 |  | 
|  | 994 | return 0; | 
|  | 995 | } | 
|  | 996 |  | 
|  | 997 | int | 
|  | 998 | linvfs_get_block( | 
|  | 999 | struct inode		*inode, | 
|  | 1000 | sector_t		iblock, | 
|  | 1001 | struct buffer_head	*bh_result, | 
|  | 1002 | int			create) | 
|  | 1003 | { | 
|  | 1004 | return __linvfs_get_block(inode, iblock, 0, bh_result, | 
|  | 1005 | create, 0, BMAPI_WRITE); | 
|  | 1006 | } | 
|  | 1007 |  | 
|  | 1008 | STATIC int | 
|  | 1009 | linvfs_get_blocks_direct( | 
|  | 1010 | struct inode		*inode, | 
|  | 1011 | sector_t		iblock, | 
|  | 1012 | unsigned long		max_blocks, | 
|  | 1013 | struct buffer_head	*bh_result, | 
|  | 1014 | int			create) | 
|  | 1015 | { | 
|  | 1016 | return __linvfs_get_block(inode, iblock, max_blocks, bh_result, | 
|  | 1017 | create, 1, BMAPI_WRITE|BMAPI_DIRECT); | 
|  | 1018 | } | 
|  | 1019 |  | 
|  | 1020 | STATIC ssize_t | 
|  | 1021 | linvfs_direct_IO( | 
|  | 1022 | int			rw, | 
|  | 1023 | struct kiocb		*iocb, | 
|  | 1024 | const struct iovec	*iov, | 
|  | 1025 | loff_t			offset, | 
|  | 1026 | unsigned long		nr_segs) | 
|  | 1027 | { | 
|  | 1028 | struct file	*file = iocb->ki_filp; | 
|  | 1029 | struct inode	*inode = file->f_mapping->host; | 
|  | 1030 | vnode_t		*vp = LINVFS_GET_VP(inode); | 
|  | 1031 | xfs_iomap_t	iomap; | 
|  | 1032 | int		maps = 1; | 
|  | 1033 | int		error; | 
|  | 1034 |  | 
|  | 1035 | VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error); | 
|  | 1036 | if (error) | 
|  | 1037 | return -error; | 
|  | 1038 |  | 
|  | 1039 | return blockdev_direct_IO_own_locking(rw, iocb, inode, | 
|  | 1040 | iomap.iomap_target->pbr_bdev, | 
|  | 1041 | iov, offset, nr_segs, | 
|  | 1042 | linvfs_get_blocks_direct, | 
|  | 1043 | linvfs_unwritten_convert_direct); | 
|  | 1044 | } | 
|  | 1045 |  | 
|  | 1046 |  | 
|  | 1047 | STATIC sector_t | 
|  | 1048 | linvfs_bmap( | 
|  | 1049 | struct address_space	*mapping, | 
|  | 1050 | sector_t		block) | 
|  | 1051 | { | 
|  | 1052 | struct inode		*inode = (struct inode *)mapping->host; | 
|  | 1053 | vnode_t			*vp = LINVFS_GET_VP(inode); | 
|  | 1054 | int			error; | 
|  | 1055 |  | 
|  | 1056 | vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address); | 
|  | 1057 |  | 
|  | 1058 | VOP_RWLOCK(vp, VRWLOCK_READ); | 
|  | 1059 | VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); | 
|  | 1060 | VOP_RWUNLOCK(vp, VRWLOCK_READ); | 
|  | 1061 | return generic_block_bmap(mapping, block, linvfs_get_block); | 
|  | 1062 | } | 
|  | 1063 |  | 
|  | 1064 | STATIC int | 
|  | 1065 | linvfs_readpage( | 
|  | 1066 | struct file		*unused, | 
|  | 1067 | struct page		*page) | 
|  | 1068 | { | 
|  | 1069 | return mpage_readpage(page, linvfs_get_block); | 
|  | 1070 | } | 
|  | 1071 |  | 
|  | 1072 | STATIC int | 
|  | 1073 | linvfs_readpages( | 
|  | 1074 | struct file		*unused, | 
|  | 1075 | struct address_space	*mapping, | 
|  | 1076 | struct list_head	*pages, | 
|  | 1077 | unsigned		nr_pages) | 
|  | 1078 | { | 
|  | 1079 | return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block); | 
|  | 1080 | } | 
|  | 1081 |  | 
|  | 1082 | STATIC void | 
|  | 1083 | xfs_count_page_state( | 
|  | 1084 | struct page		*page, | 
|  | 1085 | int			*delalloc, | 
|  | 1086 | int			*unmapped, | 
|  | 1087 | int			*unwritten) | 
|  | 1088 | { | 
|  | 1089 | struct buffer_head	*bh, *head; | 
|  | 1090 |  | 
|  | 1091 | *delalloc = *unmapped = *unwritten = 0; | 
|  | 1092 |  | 
|  | 1093 | bh = head = page_buffers(page); | 
|  | 1094 | do { | 
|  | 1095 | if (buffer_uptodate(bh) && !buffer_mapped(bh)) | 
|  | 1096 | (*unmapped) = 1; | 
|  | 1097 | else if (buffer_unwritten(bh) && !buffer_delay(bh)) | 
|  | 1098 | clear_buffer_unwritten(bh); | 
|  | 1099 | else if (buffer_unwritten(bh)) | 
|  | 1100 | (*unwritten) = 1; | 
|  | 1101 | else if (buffer_delay(bh)) | 
|  | 1102 | (*delalloc) = 1; | 
|  | 1103 | } while ((bh = bh->b_this_page) != head); | 
|  | 1104 | } | 
|  | 1105 |  | 
|  | 1106 |  | 
|  | 1107 | /* | 
|  | 1108 | * writepage: Called from one of two places: | 
|  | 1109 | * | 
|  | 1110 | * 1. we are flushing a delalloc buffer head. | 
|  | 1111 | * | 
|  | 1112 | * 2. we are writing out a dirty page. Typically the page dirty | 
|  | 1113 | *    state is cleared before we get here. In this case is it | 
|  | 1114 | *    conceivable we have no buffer heads. | 
|  | 1115 | * | 
|  | 1116 | * For delalloc space on the page we need to allocate space and | 
|  | 1117 | * flush it. For unmapped buffer heads on the page we should | 
|  | 1118 | * allocate space if the page is uptodate. For any other dirty | 
|  | 1119 | * buffer heads on the page we should flush them. | 
|  | 1120 | * | 
|  | 1121 | * If we detect that a transaction would be required to flush | 
|  | 1122 | * the page, we have to check the process flags first, if we | 
|  | 1123 | * are already in a transaction or disk I/O during allocations | 
|  | 1124 | * is off, we need to fail the writepage and redirty the page. | 
|  | 1125 | */ | 
|  | 1126 |  | 
|  | 1127 | STATIC int | 
|  | 1128 | linvfs_writepage( | 
|  | 1129 | struct page		*page, | 
|  | 1130 | struct writeback_control *wbc) | 
|  | 1131 | { | 
|  | 1132 | int			error; | 
|  | 1133 | int			need_trans; | 
|  | 1134 | int			delalloc, unmapped, unwritten; | 
|  | 1135 | struct inode		*inode = page->mapping->host; | 
|  | 1136 |  | 
|  | 1137 | xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); | 
|  | 1138 |  | 
|  | 1139 | /* | 
|  | 1140 | * We need a transaction if: | 
|  | 1141 | *  1. There are delalloc buffers on the page | 
|  | 1142 | *  2. The page is uptodate and we have unmapped buffers | 
|  | 1143 | *  3. The page is uptodate and we have no buffers | 
|  | 1144 | *  4. There are unwritten buffers on the page | 
|  | 1145 | */ | 
|  | 1146 |  | 
|  | 1147 | if (!page_has_buffers(page)) { | 
|  | 1148 | unmapped = 1; | 
|  | 1149 | need_trans = 1; | 
|  | 1150 | } else { | 
|  | 1151 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | 
|  | 1152 | if (!PageUptodate(page)) | 
|  | 1153 | unmapped = 0; | 
|  | 1154 | need_trans = delalloc + unmapped + unwritten; | 
|  | 1155 | } | 
|  | 1156 |  | 
|  | 1157 | /* | 
|  | 1158 | * If we need a transaction and the process flags say | 
|  | 1159 | * we are already in a transaction, or no IO is allowed | 
|  | 1160 | * then mark the page dirty again and leave the page | 
|  | 1161 | * as is. | 
|  | 1162 | */ | 
|  | 1163 | if (PFLAGS_TEST_FSTRANS() && need_trans) | 
|  | 1164 | goto out_fail; | 
|  | 1165 |  | 
|  | 1166 | /* | 
|  | 1167 | * Delay hooking up buffer heads until we have | 
|  | 1168 | * made our go/no-go decision. | 
|  | 1169 | */ | 
|  | 1170 | if (!page_has_buffers(page)) | 
|  | 1171 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); | 
|  | 1172 |  | 
|  | 1173 | /* | 
|  | 1174 | * Convert delayed allocate, unwritten or unmapped space | 
|  | 1175 | * to real space and flush out to disk. | 
|  | 1176 | */ | 
|  | 1177 | error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); | 
|  | 1178 | if (error == -EAGAIN) | 
|  | 1179 | goto out_fail; | 
|  | 1180 | if (unlikely(error < 0)) | 
|  | 1181 | goto out_unlock; | 
|  | 1182 |  | 
|  | 1183 | return 0; | 
|  | 1184 |  | 
|  | 1185 | out_fail: | 
|  | 1186 | redirty_page_for_writepage(wbc, page); | 
|  | 1187 | unlock_page(page); | 
|  | 1188 | return 0; | 
|  | 1189 | out_unlock: | 
|  | 1190 | unlock_page(page); | 
|  | 1191 | return error; | 
|  | 1192 | } | 
|  | 1193 |  | 
|  | 1194 | /* | 
|  | 1195 | * Called to move a page into cleanable state - and from there | 
|  | 1196 | * to be released. Possibly the page is already clean. We always | 
|  | 1197 | * have buffer heads in this call. | 
|  | 1198 | * | 
|  | 1199 | * Returns 0 if the page is ok to release, 1 otherwise. | 
|  | 1200 | * | 
|  | 1201 | * Possible scenarios are: | 
|  | 1202 | * | 
|  | 1203 | * 1. We are being called to release a page which has been written | 
|  | 1204 | *    to via regular I/O. buffer heads will be dirty and possibly | 
|  | 1205 | *    delalloc. If no delalloc buffer heads in this case then we | 
|  | 1206 | *    can just return zero. | 
|  | 1207 | * | 
|  | 1208 | * 2. We are called to release a page which has been written via | 
|  | 1209 | *    mmap, all we need to do is ensure there is no delalloc | 
|  | 1210 | *    state in the buffer heads, if not we can let the caller | 
|  | 1211 | *    free them and we should come back later via writepage. | 
|  | 1212 | */ | 
|  | 1213 | STATIC int | 
|  | 1214 | linvfs_release_page( | 
|  | 1215 | struct page		*page, | 
|  | 1216 | int			gfp_mask) | 
|  | 1217 | { | 
|  | 1218 | struct inode		*inode = page->mapping->host; | 
|  | 1219 | int			dirty, delalloc, unmapped, unwritten; | 
|  | 1220 | struct writeback_control wbc = { | 
|  | 1221 | .sync_mode = WB_SYNC_ALL, | 
|  | 1222 | .nr_to_write = 1, | 
|  | 1223 | }; | 
|  | 1224 |  | 
|  | 1225 | xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); | 
|  | 1226 |  | 
|  | 1227 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | 
|  | 1228 | if (!delalloc && !unwritten) | 
|  | 1229 | goto free_buffers; | 
|  | 1230 |  | 
|  | 1231 | if (!(gfp_mask & __GFP_FS)) | 
|  | 1232 | return 0; | 
|  | 1233 |  | 
|  | 1234 | /* If we are already inside a transaction or the thread cannot | 
|  | 1235 | * do I/O, we cannot release this page. | 
|  | 1236 | */ | 
|  | 1237 | if (PFLAGS_TEST_FSTRANS()) | 
|  | 1238 | return 0; | 
|  | 1239 |  | 
|  | 1240 | /* | 
|  | 1241 | * Convert delalloc space to real space, do not flush the | 
|  | 1242 | * data out to disk, that will be done by the caller. | 
|  | 1243 | * Never need to allocate space here - we will always | 
|  | 1244 | * come back to writepage in that case. | 
|  | 1245 | */ | 
|  | 1246 | dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); | 
|  | 1247 | if (dirty == 0 && !unwritten) | 
|  | 1248 | goto free_buffers; | 
|  | 1249 | return 0; | 
|  | 1250 |  | 
|  | 1251 | free_buffers: | 
|  | 1252 | return try_to_free_buffers(page); | 
|  | 1253 | } | 
|  | 1254 |  | 
|  | 1255 | STATIC int | 
|  | 1256 | linvfs_prepare_write( | 
|  | 1257 | struct file		*file, | 
|  | 1258 | struct page		*page, | 
|  | 1259 | unsigned int		from, | 
|  | 1260 | unsigned int		to) | 
|  | 1261 | { | 
|  | 1262 | return block_prepare_write(page, from, to, linvfs_get_block); | 
|  | 1263 | } | 
|  | 1264 |  | 
|  | 1265 | struct address_space_operations linvfs_aops = { | 
|  | 1266 | .readpage		= linvfs_readpage, | 
|  | 1267 | .readpages		= linvfs_readpages, | 
|  | 1268 | .writepage		= linvfs_writepage, | 
|  | 1269 | .sync_page		= block_sync_page, | 
|  | 1270 | .releasepage		= linvfs_release_page, | 
|  | 1271 | .prepare_write		= linvfs_prepare_write, | 
|  | 1272 | .commit_write		= generic_commit_write, | 
|  | 1273 | .bmap			= linvfs_bmap, | 
|  | 1274 | .direct_IO		= linvfs_direct_IO, | 
|  | 1275 | }; |