blob: f35dba9bf1d9d10b06dccc1cf477553d6454d596 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100040#include "xfs_vnodeops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110042#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/writeback.h>
44
Nathan Scottf51623b2006-03-14 13:26:27 +110045STATIC void
46xfs_count_page_state(
47 struct page *page,
48 int *delalloc,
49 int *unmapped,
50 int *unwritten)
51{
52 struct buffer_head *bh, *head;
53
54 *delalloc = *unmapped = *unwritten = 0;
55
56 bh = head = page_buffers(page);
57 do {
58 if (buffer_uptodate(bh) && !buffer_mapped(bh))
59 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110060 else if (buffer_unwritten(bh))
61 (*unwritten) = 1;
62 else if (buffer_delay(bh))
63 (*delalloc) = 1;
64 } while ((bh = bh->b_this_page) != head);
65}
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#if defined(XFS_RW_TRACE)
68void
69xfs_page_trace(
70 int tag,
71 struct inode *inode,
72 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100073 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110077 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 int delalloc = -1, unmapped = -1, unwritten = -1;
79
80 if (page_has_buffers(page))
81 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
82
David Chinnere6064d32008-08-13 16:01:45 +100083 ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 if (!ip->i_rwtrace)
85 return;
86
87 ktrace_enter(ip->i_rwtrace,
88 (void *)((unsigned long)tag),
89 (void *)ip,
90 (void *)inode,
91 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100092 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
94 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
95 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(isize & 0xffffffff)),
97 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(offset & 0xffffffff)),
99 (void *)((unsigned long)delalloc),
100 (void *)((unsigned long)unmapped),
101 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100102 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 (void *)NULL);
104}
105#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000106#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#endif
108
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000109STATIC struct block_device *
110xfs_find_bdev_for_inode(
111 struct xfs_inode *ip)
112{
113 struct xfs_mount *mp = ip->i_mount;
114
Eric Sandeen71ddabb2007-11-23 16:29:42 +1100115 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000116 return mp->m_rtdev_targp->bt_bdev;
117 else
118 return mp->m_ddev_targp->bt_bdev;
119}
120
Christoph Hellwig0829c362005-09-02 16:58:49 +1000121/*
122 * Schedule IO completion handling on a xfsdatad if this was
David Chinnere927af92007-06-05 16:24:36 +1000123 * the final hold on this ioend. If we are asked to wait,
124 * flush the workqueue.
Christoph Hellwig0829c362005-09-02 16:58:49 +1000125 */
126STATIC void
127xfs_finish_ioend(
David Chinnere927af92007-06-05 16:24:36 +1000128 xfs_ioend_t *ioend,
129 int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
David Chinnere927af92007-06-05 16:24:36 +1000131 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig0829c362005-09-02 16:58:49 +1000132 queue_work(xfsdatad_workqueue, &ioend->io_work);
David Chinnere927af92007-06-05 16:24:36 +1000133 if (wait)
134 flush_workqueue(xfsdatad_workqueue);
135 }
Christoph Hellwig0829c362005-09-02 16:58:49 +1000136}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100138/*
139 * We're now finished for good with this ioend structure.
140 * Update the page state via the associated buffer_heads,
141 * release holds on the inode and bio, and finally free
142 * up memory. Do not use the ioend after this.
143 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000144STATIC void
145xfs_destroy_ioend(
146 xfs_ioend_t *ioend)
147{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100148 struct buffer_head *bh, *next;
Christoph Hellwig583fa582008-12-03 12:20:38 +0100149 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100150
151 for (bh = ioend->io_buffer_head; bh; bh = next) {
152 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000153 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100154 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100155
156 /*
157 * Volume managers supporting multiple paths can send back ENODEV
158 * when the final path disappears. In this case continuing to fill
159 * the page cache with dirty data which cannot be written out is
160 * evil, so prevent that.
161 */
162 if (unlikely(ioend->io_error == -ENODEV)) {
163 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
164 __FILE__, __LINE__);
Christoph Hellwigb677c212007-08-29 11:46:28 +1000165 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100166
167 vn_iowake(ip);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000168 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
171/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000172 * Update on-disk file size now that data has been written to disk.
173 * The current in-memory file size is i_size. If a write is beyond
Christoph Hellwig613d7042007-10-11 17:44:08 +1000174 * eof i_new_size will be the intended file size until i_size is
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000175 * updated. If this write does not extend all the way to the valid
176 * file size then restrict this update to the end of the write.
177 */
178STATIC void
179xfs_setfilesize(
180 xfs_ioend_t *ioend)
181{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000182 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000183 xfs_fsize_t isize;
184 xfs_fsize_t bsize;
185
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000186 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
187 ASSERT(ioend->io_type != IOMAP_READ);
188
189 if (unlikely(ioend->io_error))
190 return;
191
192 bsize = ioend->io_offset + ioend->io_size;
193
194 xfs_ilock(ip, XFS_ILOCK_EXCL);
195
Christoph Hellwig613d7042007-10-11 17:44:08 +1000196 isize = MAX(ip->i_size, ip->i_new_size);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000197 isize = MIN(isize, bsize);
198
199 if (ip->i_d.di_size < isize) {
200 ip->i_d.di_size = isize;
201 ip->i_update_core = 1;
202 ip->i_update_size = 1;
David Chinner94b97e32008-10-30 17:21:30 +1100203 xfs_mark_inode_dirty_sync(ip);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000204 }
205
206 xfs_iunlock(ip, XFS_ILOCK_EXCL);
207}
208
209/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100210 * Buffered IO write completion for delayed allocate extents.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100211 */
212STATIC void
213xfs_end_bio_delalloc(
David Howellsc4028952006-11-22 14:57:56 +0000214 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100215{
David Howellsc4028952006-11-22 14:57:56 +0000216 xfs_ioend_t *ioend =
217 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100218
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000219 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100220 xfs_destroy_ioend(ioend);
221}
222
223/*
224 * Buffered IO write completion for regular, written extents.
225 */
226STATIC void
227xfs_end_bio_written(
David Howellsc4028952006-11-22 14:57:56 +0000228 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100229{
David Howellsc4028952006-11-22 14:57:56 +0000230 xfs_ioend_t *ioend =
231 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100232
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000233 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100234 xfs_destroy_ioend(ioend);
235}
236
237/*
238 * IO write completion for unwritten extents.
239 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000241 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 */
243STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000244xfs_end_bio_unwritten(
David Howellsc4028952006-11-22 14:57:56 +0000245 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
David Howellsc4028952006-11-22 14:57:56 +0000247 xfs_ioend_t *ioend =
248 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwig76428612007-09-14 15:23:31 +1000249 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000250 xfs_off_t offset = ioend->io_offset;
251 size_t size = ioend->io_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000253 if (likely(!ioend->io_error)) {
David Chinnercc884662008-04-10 12:23:52 +1000254 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
255 int error;
256 error = xfs_iomap_write_unwritten(ip, offset, size);
257 if (error)
258 ioend->io_error = error;
259 }
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000260 xfs_setfilesize(ioend);
261 }
262 xfs_destroy_ioend(ioend);
263}
264
265/*
266 * IO read completion for regular, written extents.
267 */
268STATIC void
269xfs_end_bio_read(
270 struct work_struct *work)
271{
272 xfs_ioend_t *ioend =
273 container_of(work, xfs_ioend_t, io_work);
274
Christoph Hellwig0829c362005-09-02 16:58:49 +1000275 xfs_destroy_ioend(ioend);
276}
277
278/*
279 * Allocate and initialise an IO completion structure.
280 * We need to track unwritten extent write completion here initially.
281 * We'll need to extend this for updating the ondisk inode size later
282 * (vs. incore size).
283 */
284STATIC xfs_ioend_t *
285xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100286 struct inode *inode,
287 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000288{
289 xfs_ioend_t *ioend;
290
291 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
292
293 /*
294 * Set the count to 1 initially, which will prevent an I/O
295 * completion callback from happening before we have started
296 * all the I/O from calling the completion routine too early.
297 */
298 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000299 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100300 ioend->io_list = NULL;
301 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000302 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000303 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100304 ioend->io_buffer_tail = NULL;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000305 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000306 ioend->io_offset = 0;
307 ioend->io_size = 0;
308
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100309 if (type == IOMAP_UNWRITTEN)
David Howellsc4028952006-11-22 14:57:56 +0000310 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100311 else if (type == IOMAP_DELAY)
David Howellsc4028952006-11-22 14:57:56 +0000312 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000313 else if (type == IOMAP_READ)
314 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100315 else
David Howellsc4028952006-11-22 14:57:56 +0000316 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000317
318 return ioend;
319}
320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321STATIC int
322xfs_map_blocks(
323 struct inode *inode,
324 loff_t offset,
325 ssize_t count,
326 xfs_iomap_t *mapp,
327 int flags)
328{
Christoph Hellwig6bd16ff2008-12-03 12:20:32 +0100329 int nmaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Christoph Hellwig6bd16ff2008-12-03 12:20:32 +0100331 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332}
333
David Chinner7989cb82007-02-10 18:34:56 +1100334STATIC_INLINE int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100335xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100337 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100339 return offset >= iomapp->iomap_offset &&
340 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341}
342
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100343/*
344 * BIO completion handler for buffered IO.
345 */
Al Viro782e3b32007-10-12 07:17:47 +0100346STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100347xfs_end_bio(
348 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100349 int error)
350{
351 xfs_ioend_t *ioend = bio->bi_private;
352
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100353 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000354 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100355
356 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100357 bio->bi_private = NULL;
358 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100359 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000360
David Chinnere927af92007-06-05 16:24:36 +1000361 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100362}
363
364STATIC void
365xfs_submit_ioend_bio(
366 xfs_ioend_t *ioend,
367 struct bio *bio)
368{
369 atomic_inc(&ioend->io_remaining);
370
371 bio->bi_private = ioend;
372 bio->bi_end_io = xfs_end_bio;
373
374 submit_bio(WRITE, bio);
375 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
376 bio_put(bio);
377}
378
379STATIC struct bio *
380xfs_alloc_ioend_bio(
381 struct buffer_head *bh)
382{
383 struct bio *bio;
384 int nvecs = bio_get_nr_vecs(bh->b_bdev);
385
386 do {
387 bio = bio_alloc(GFP_NOIO, nvecs);
388 nvecs >>= 1;
389 } while (!bio);
390
391 ASSERT(bio->bi_private == NULL);
392 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
393 bio->bi_bdev = bh->b_bdev;
394 bio_get(bio);
395 return bio;
396}
397
398STATIC void
399xfs_start_buffer_writeback(
400 struct buffer_head *bh)
401{
402 ASSERT(buffer_mapped(bh));
403 ASSERT(buffer_locked(bh));
404 ASSERT(!buffer_delay(bh));
405 ASSERT(!buffer_unwritten(bh));
406
407 mark_buffer_async_write(bh);
408 set_buffer_uptodate(bh);
409 clear_buffer_dirty(bh);
410}
411
412STATIC void
413xfs_start_page_writeback(
414 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100415 int clear_dirty,
416 int buffers)
417{
418 ASSERT(PageLocked(page));
419 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100420 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100421 clear_page_dirty_for_io(page);
422 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100423 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700424 /* If no buffers on the page are to be written, finish it here */
425 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100426 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100427}
428
429static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
430{
431 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
432}
433
434/*
David Chinnerd88992f2006-01-18 13:38:12 +1100435 * Submit all of the bios for all of the ioends we have saved up, covering the
436 * initial writepage page and also any probed pages.
437 *
438 * Because we may have multiple ioends spanning a page, we need to start
439 * writeback on all the buffers before we submit them for I/O. If we mark the
440 * buffers as we got, then we can end up with a page that only has buffers
441 * marked async write and I/O complete on can occur before we mark the other
442 * buffers async write.
443 *
444 * The end result of this is that we trip a bug in end_page_writeback() because
445 * we call it twice for the one page as the code in end_buffer_async_write()
446 * assumes that all buffers on the page are started at the same time.
447 *
448 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000449 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100450 */
451STATIC void
452xfs_submit_ioend(
453 xfs_ioend_t *ioend)
454{
David Chinnerd88992f2006-01-18 13:38:12 +1100455 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100456 xfs_ioend_t *next;
457 struct buffer_head *bh;
458 struct bio *bio;
459 sector_t lastblock = 0;
460
David Chinnerd88992f2006-01-18 13:38:12 +1100461 /* Pass 1 - start writeback */
462 do {
463 next = ioend->io_list;
464 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
465 xfs_start_buffer_writeback(bh);
466 }
467 } while ((ioend = next) != NULL);
468
469 /* Pass 2 - submit I/O */
470 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100471 do {
472 next = ioend->io_list;
473 bio = NULL;
474
475 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100476
477 if (!bio) {
478 retry:
479 bio = xfs_alloc_ioend_bio(bh);
480 } else if (bh->b_blocknr != lastblock + 1) {
481 xfs_submit_ioend_bio(ioend, bio);
482 goto retry;
483 }
484
485 if (bio_add_buffer(bio, bh) != bh->b_size) {
486 xfs_submit_ioend_bio(ioend, bio);
487 goto retry;
488 }
489
490 lastblock = bh->b_blocknr;
491 }
492 if (bio)
493 xfs_submit_ioend_bio(ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000494 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100495 } while ((ioend = next) != NULL);
496}
497
498/*
499 * Cancel submission of all buffer_heads so far in this endio.
500 * Toss the endio too. Only ever called for the initial page
501 * in a writepage request, so only ever one page.
502 */
503STATIC void
504xfs_cancel_ioend(
505 xfs_ioend_t *ioend)
506{
507 xfs_ioend_t *next;
508 struct buffer_head *bh, *next_bh;
509
510 do {
511 next = ioend->io_list;
512 bh = ioend->io_buffer_head;
513 do {
514 next_bh = bh->b_private;
515 clear_buffer_async_write(bh);
516 unlock_buffer(bh);
517 } while ((bh = next_bh) != NULL);
518
Christoph Hellwigb677c212007-08-29 11:46:28 +1000519 vn_iowake(XFS_I(ioend->io_inode));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100520 mempool_free(ioend, xfs_ioend_pool);
521 } while ((ioend = next) != NULL);
522}
523
524/*
525 * Test to see if we've been building up a completion structure for
526 * earlier buffers -- if so, we try to append to this ioend if we
527 * can, otherwise we finish off any current ioend and start another.
528 * Return true if we've finished the given ioend.
529 */
530STATIC void
531xfs_add_to_ioend(
532 struct inode *inode,
533 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100534 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100535 unsigned int type,
536 xfs_ioend_t **result,
537 int need_ioend)
538{
539 xfs_ioend_t *ioend = *result;
540
541 if (!ioend || need_ioend || type != ioend->io_type) {
542 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100543
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100544 ioend = xfs_alloc_ioend(inode, type);
545 ioend->io_offset = offset;
546 ioend->io_buffer_head = bh;
547 ioend->io_buffer_tail = bh;
548 if (previous)
549 previous->io_list = ioend;
550 *result = ioend;
551 } else {
552 ioend->io_buffer_tail->b_private = bh;
553 ioend->io_buffer_tail = bh;
554 }
555
556 bh->b_private = NULL;
557 ioend->io_size += bh->b_size;
558}
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100561xfs_map_buffer(
562 struct buffer_head *bh,
563 xfs_iomap_t *mp,
564 xfs_off_t offset,
565 uint block_bits)
566{
567 sector_t bn;
568
569 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
570
571 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
572 ((offset - mp->iomap_offset) >> block_bits);
573
574 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
575
576 bh->b_blocknr = bn;
577 set_buffer_mapped(bh);
578}
579
580STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100583 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100585 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
588 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100591 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100592 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 set_buffer_mapped(bh);
594 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100595 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596}
597
598/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100599 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 */
601STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100602xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100603 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100604 unsigned int pg_offset,
605 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 int ret = 0;
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100610 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612 if (page->mapping && PageDirty(page)) {
613 if (page_has_buffers(page)) {
614 struct buffer_head *bh, *head;
615
616 bh = head = page_buffers(page);
617 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100618 if (!buffer_uptodate(bh))
619 break;
620 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 break;
622 ret += bh->b_size;
623 if (ret >= pg_offset)
624 break;
625 } while ((bh = bh->b_this_page) != head);
626 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100627 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 }
629
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 return ret;
631}
632
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100633STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100634xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 struct inode *inode,
636 struct page *startpage,
637 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100638 struct buffer_head *head,
639 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100641 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100643 size_t total = 0;
644 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
646 /* First sum forwards in this page */
647 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100648 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100649 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 total += bh->b_size;
651 } while ((bh = bh->b_this_page) != head);
652
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100653 /* if we reached the end of the page, sum forwards in following pages */
654 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
655 tindex = startpage->index + 1;
656
657 /* Prune this back to avoid pathological behavior */
658 tloff = min(tlast, startpage->index + 64);
659
660 pagevec_init(&pvec, 0);
661 while (!done && tindex <= tloff) {
662 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
663
664 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
665 break;
666
667 for (i = 0; i < pagevec_count(&pvec); i++) {
668 struct page *page = pvec.pages[i];
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000669 size_t pg_offset, pg_len = 0;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100670
671 if (tindex == tlast) {
672 pg_offset =
673 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100674 if (!pg_offset) {
675 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100676 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100677 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100678 } else
679 pg_offset = PAGE_CACHE_SIZE;
680
Nick Piggin529ae9a2008-08-02 12:01:03 +0200681 if (page->index == tindex && trylock_page(page)) {
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000682 pg_len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100683 unlock_page(page);
684 }
685
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000686 if (!pg_len) {
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100687 done = 1;
688 break;
689 }
690
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000691 total += pg_len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100692 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100694
695 pagevec_release(&pvec);
696 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 return total;
700}
701
702/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100703 * Test if a given page is suitable for writing as part of an unwritten
704 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100706STATIC int
707xfs_is_delayed_page(
708 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100709 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100712 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714 if (page->mapping && page_has_buffers(page)) {
715 struct buffer_head *bh, *head;
716 int acceptable = 0;
717
718 bh = head = page_buffers(page);
719 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100720 if (buffer_unwritten(bh))
721 acceptable = (type == IOMAP_UNWRITTEN);
722 else if (buffer_delay(bh))
723 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100724 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000725 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100726 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 } while ((bh = bh->b_this_page) != head);
729
730 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100731 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
733
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100734 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735}
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737/*
738 * Allocate & map buffers for page given the extent map. Write it out.
739 * except for the original page of a writepage, this is called on
740 * delalloc/unwritten pages only, for the original page it is possible
741 * that the page has no mapping at all.
742 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100743STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744xfs_convert_page(
745 struct inode *inode,
746 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100747 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100748 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100749 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 int startio,
752 int all_bh)
753{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100754 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100755 xfs_off_t end_offset;
756 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100757 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700759 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100760 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100761 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100763 if (page->index != tindex)
764 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200765 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100766 goto fail;
767 if (PageWriteback(page))
768 goto fail_unlock_page;
769 if (page->mapping != inode->i_mapping)
770 goto fail_unlock_page;
771 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
772 goto fail_unlock_page;
773
Nathan Scott24e17b52005-05-05 13:33:20 -0700774 /*
775 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000776 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100777 *
778 * Derivation:
779 *
780 * End offset is the highest offset that this page should represent.
781 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
782 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
783 * hence give us the correct page_dirty count. On any other page,
784 * it will be zero and in that case we need page_dirty to be the
785 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700786 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100787 end_offset = min_t(unsigned long long,
788 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
789 i_size_read(inode));
790
Nathan Scott24e17b52005-05-05 13:33:20 -0700791 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100792 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
793 PAGE_CACHE_SIZE);
794 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
795 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700796
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 bh = head = page_buffers(page);
798 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100799 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100801 if (!buffer_uptodate(bh))
802 uptodate = 0;
803 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
804 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100806 }
807
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100808 if (buffer_unwritten(bh) || buffer_delay(bh)) {
809 if (buffer_unwritten(bh))
810 type = IOMAP_UNWRITTEN;
811 else
812 type = IOMAP_DELAY;
813
814 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100815 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100816 continue;
817 }
818
819 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
820 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
821
822 xfs_map_at_offset(bh, offset, bbits, mp);
823 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100824 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100825 type, ioendp, done);
826 } else {
827 set_buffer_dirty(bh);
828 unlock_buffer(bh);
829 mark_buffer_dirty(bh);
830 }
831 page_dirty--;
832 count++;
833 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000834 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100835 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100837 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100838 type, ioendp, done);
839 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700840 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100841 } else {
842 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100845 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100847 if (uptodate && bh == head)
848 SetPageUptodate(page);
849
850 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100851 if (count) {
852 struct backing_dev_info *bdi;
853
854 bdi = inode->i_mapping->backing_dev_info;
David Chinner9fddaca2006-02-07 20:27:24 +1100855 wbc->nr_to_write--;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100856 if (bdi_write_congested(bdi)) {
857 wbc->encountered_congestion = 1;
858 done = 1;
David Chinner9fddaca2006-02-07 20:27:24 +1100859 } else if (wbc->nr_to_write <= 0) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100860 done = 1;
861 }
862 }
Denys Vlasenkob41759c2008-05-19 16:34:11 +1000863 xfs_start_page_writeback(page, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100865
866 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100867 fail_unlock_page:
868 unlock_page(page);
869 fail:
870 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871}
872
873/*
874 * Convert & write out a cluster of pages in the same extent as defined
875 * by mp and following the start page.
876 */
877STATIC void
878xfs_cluster_write(
879 struct inode *inode,
880 pgoff_t tindex,
881 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100882 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 struct writeback_control *wbc,
884 int startio,
885 int all_bh,
886 pgoff_t tlast)
887{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100888 struct pagevec pvec;
889 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100891 pagevec_init(&pvec, 0);
892 while (!done && tindex <= tlast) {
893 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
894
895 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100897
898 for (i = 0; i < pagevec_count(&pvec); i++) {
899 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
900 iomapp, ioendp, wbc, startio, all_bh);
901 if (done)
902 break;
903 }
904
905 pagevec_release(&pvec);
906 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
908}
909
910/*
911 * Calling this without startio set means we are being asked to make a dirty
912 * page ready for freeing it's buffers. When called with startio set then
913 * we are coming from writepage.
914 *
915 * When called with startio set it is important that we write the WHOLE
916 * page if possible.
917 * The bh->b_state's cannot know if any of the blocks or which block for
918 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000919 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 * may clear the page dirty flag prior to calling write page, under the
921 * assumption the entire page will be written out; by not writing out the
922 * whole page the page can be reused before all valid dirty data is
923 * written out. Note: in the case of a page that has been dirty'd by
924 * mapwrite and but partially setup by block_prepare_write the
925 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
926 * valid state, thus the whole page must be written out thing.
927 */
928
929STATIC int
930xfs_page_state_convert(
931 struct inode *inode,
932 struct page *page,
933 struct writeback_control *wbc,
934 int startio,
935 int unmapped) /* also implies page uptodate */
936{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100937 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100938 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100939 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 loff_t offset;
941 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100942 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 __uint64_t end_offset;
944 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100945 ssize_t size, len;
946 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000947 int page_dirty, count = 0;
948 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100949 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Nathan Scott82721452006-04-11 15:10:55 +1000951 if (startio) {
952 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
953 trylock |= BMAPI_TRYLOCK;
954 }
Daniel Moore3ba08152005-05-05 13:31:34 -0700955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 /* Is this page beyond the end of the file? */
957 offset = i_size_read(inode);
958 end_index = offset >> PAGE_CACHE_SHIFT;
959 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
960 if (page->index >= end_index) {
961 if ((page->index >= end_index + 1) ||
962 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100963 if (startio)
964 unlock_page(page);
965 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967 }
968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 /*
Nathan Scott24e17b52005-05-05 13:33:20 -0700970 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000971 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100972 *
973 * Derivation:
974 *
975 * End offset is the highest offset that this page should represent.
976 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
977 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
978 * hence give us the correct page_dirty count. On any other page,
979 * it will be zero and in that case we need page_dirty to be the
980 * count of buffers on the page.
981 */
982 end_offset = min_t(unsigned long long,
983 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700984 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100985 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
986 PAGE_CACHE_SIZE);
987 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -0700988 page_dirty = p_offset / len;
989
Nathan Scott24e17b52005-05-05 13:33:20 -0700990 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100991 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +1000992 flags = BMAPI_READ;
993 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100994
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100995 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 do {
998 if (offset >= end_offset)
999 break;
1000 if (!buffer_uptodate(bh))
1001 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001002 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001003 /*
1004 * the iomap is actually still valid, but the ioend
1005 * isn't. shouldn't happen too often.
1006 */
1007 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001011 if (iomap_valid)
1012 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
1014 /*
1015 * First case, map an unwritten extent and prepare for
1016 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001017 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 * Second case, allocate space for a delalloc buffer.
1019 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001020 *
1021 * Third case, an unmapped buffer was found, and we are
1022 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001023 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001024 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1025 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1026 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnereffd1202007-06-18 16:49:58 +10001027 int new_ioend = 0;
1028
David Chinnerdf3c7242007-05-24 15:27:03 +10001029 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001030 * Make sure we don't use a read-only iomap
1031 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001032 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001033 iomap_valid = 0;
1034
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001035 if (buffer_unwritten(bh)) {
1036 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001037 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001038 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001039 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001040 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001041 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001042 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001043 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001044 }
1045
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001046 if (!iomap_valid) {
David Chinnereffd1202007-06-18 16:49:58 +10001047 /*
1048 * if we didn't have a valid mapping then we
1049 * need to ensure that we put the new mapping
1050 * in a new ioend structure. This needs to be
1051 * done to ensure that the ioends correctly
1052 * reflect the block mappings at io completion
1053 * for unwritten extent conversion.
1054 */
1055 new_ioend = 1;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001056 if (type == IOMAP_NEW) {
1057 size = xfs_probe_cluster(inode,
1058 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001059 } else {
1060 size = len;
1061 }
1062
1063 err = xfs_map_blocks(inode, offset, size,
1064 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001065 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001067 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001069 if (iomap_valid) {
1070 xfs_map_at_offset(bh, offset,
1071 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001073 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001074 type, &ioend,
David Chinnereffd1202007-06-18 16:49:58 +10001075 new_ioend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 } else {
1077 set_buffer_dirty(bh);
1078 unlock_buffer(bh);
1079 mark_buffer_dirty(bh);
1080 }
1081 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001082 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001084 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001085 /*
1086 * we got here because the buffer is already mapped.
1087 * That means it must already have extents allocated
1088 * underneath it. Map the extent by reading it.
1089 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001090 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001091 flags = BMAPI_READ;
1092 size = xfs_probe_cluster(inode, page, bh,
1093 head, 1);
1094 err = xfs_map_blocks(inode, offset, size,
1095 &iomap, flags);
1096 if (err)
1097 goto error;
1098 iomap_valid = xfs_iomap_valid(&iomap, offset);
1099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
David Chinnerdf3c7242007-05-24 15:27:03 +10001101 /*
1102 * We set the type to IOMAP_NEW in case we are doing a
1103 * small write at EOF that is extending the file but
1104 * without needing an allocation. We need to update the
1105 * file size on I/O completion in this case so it is
1106 * the same case as having just allocated a new extent
1107 * that we are writing into for the first time.
1108 */
1109 type = IOMAP_NEW;
Nick Pigginca5de402008-08-02 12:02:13 +02001110 if (trylock_buffer(bh)) {
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001111 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001112 if (iomap_valid)
1113 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001114 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001115 &ioend, !iomap_valid);
1116 page_dirty--;
1117 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001118 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001119 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001121 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1122 (unmapped || startio)) {
1123 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001125
1126 if (!iohead)
1127 iohead = ioend;
1128
1129 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
1131 if (uptodate && bh == head)
1132 SetPageUptodate(page);
1133
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001134 if (startio)
Denys Vlasenkob41759c2008-05-19 16:34:11 +10001135 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001137 if (ioend && iomap_valid) {
1138 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001140 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001141 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001142 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 }
1144
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001145 if (iohead)
1146 xfs_submit_ioend(iohead);
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 return page_dirty;
1149
1150error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001151 if (iohead)
1152 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154 /*
1155 * If it's delalloc and we have nowhere to put it,
1156 * throw it away, unless the lower layers told
1157 * us to try again.
1158 */
1159 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001160 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 ClearPageUptodate(page);
1163 }
1164 return err;
1165}
1166
Nathan Scottf51623b2006-03-14 13:26:27 +11001167/*
1168 * writepage: Called from one of two places:
1169 *
1170 * 1. we are flushing a delalloc buffer head.
1171 *
1172 * 2. we are writing out a dirty page. Typically the page dirty
1173 * state is cleared before we get here. In this case is it
1174 * conceivable we have no buffer heads.
1175 *
1176 * For delalloc space on the page we need to allocate space and
1177 * flush it. For unmapped buffer heads on the page we should
1178 * allocate space if the page is uptodate. For any other dirty
1179 * buffer heads on the page we should flush them.
1180 *
1181 * If we detect that a transaction would be required to flush
1182 * the page, we have to check the process flags first, if we
1183 * are already in a transaction or disk I/O during allocations
1184 * is off, we need to fail the writepage and redirty the page.
1185 */
1186
1187STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001188xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001189 struct page *page,
1190 struct writeback_control *wbc)
1191{
1192 int error;
1193 int need_trans;
1194 int delalloc, unmapped, unwritten;
1195 struct inode *inode = page->mapping->host;
1196
1197 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1198
1199 /*
1200 * We need a transaction if:
1201 * 1. There are delalloc buffers on the page
1202 * 2. The page is uptodate and we have unmapped buffers
1203 * 3. The page is uptodate and we have no buffers
1204 * 4. There are unwritten buffers on the page
1205 */
1206
1207 if (!page_has_buffers(page)) {
1208 unmapped = 1;
1209 need_trans = 1;
1210 } else {
1211 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1212 if (!PageUptodate(page))
1213 unmapped = 0;
1214 need_trans = delalloc + unmapped + unwritten;
1215 }
1216
1217 /*
1218 * If we need a transaction and the process flags say
1219 * we are already in a transaction, or no IO is allowed
1220 * then mark the page dirty again and leave the page
1221 * as is.
1222 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001223 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001224 goto out_fail;
1225
1226 /*
1227 * Delay hooking up buffer heads until we have
1228 * made our go/no-go decision.
1229 */
1230 if (!page_has_buffers(page))
1231 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1232
1233 /*
1234 * Convert delayed allocate, unwritten or unmapped space
1235 * to real space and flush out to disk.
1236 */
1237 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1238 if (error == -EAGAIN)
1239 goto out_fail;
1240 if (unlikely(error < 0))
1241 goto out_unlock;
1242
1243 return 0;
1244
1245out_fail:
1246 redirty_page_for_writepage(wbc, page);
1247 unlock_page(page);
1248 return 0;
1249out_unlock:
1250 unlock_page(page);
1251 return error;
1252}
1253
Nathan Scott7d4fb402006-06-09 15:27:16 +10001254STATIC int
1255xfs_vm_writepages(
1256 struct address_space *mapping,
1257 struct writeback_control *wbc)
1258{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001259 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001260 return generic_writepages(mapping, wbc);
1261}
1262
Nathan Scottf51623b2006-03-14 13:26:27 +11001263/*
1264 * Called to move a page into cleanable state - and from there
1265 * to be released. Possibly the page is already clean. We always
1266 * have buffer heads in this call.
1267 *
1268 * Returns 0 if the page is ok to release, 1 otherwise.
1269 *
1270 * Possible scenarios are:
1271 *
1272 * 1. We are being called to release a page which has been written
1273 * to via regular I/O. buffer heads will be dirty and possibly
1274 * delalloc. If no delalloc buffer heads in this case then we
1275 * can just return zero.
1276 *
1277 * 2. We are called to release a page which has been written via
1278 * mmap, all we need to do is ensure there is no delalloc
1279 * state in the buffer heads, if not we can let the caller
1280 * free them and we should come back later via writepage.
1281 */
1282STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001283xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001284 struct page *page,
1285 gfp_t gfp_mask)
1286{
1287 struct inode *inode = page->mapping->host;
1288 int dirty, delalloc, unmapped, unwritten;
1289 struct writeback_control wbc = {
1290 .sync_mode = WB_SYNC_ALL,
1291 .nr_to_write = 1,
1292 };
1293
Nathan Scotted9d88f2006-09-28 10:56:43 +10001294 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001295
Nathan Scott238f4c52006-03-17 17:26:25 +11001296 if (!page_has_buffers(page))
1297 return 0;
1298
Nathan Scottf51623b2006-03-14 13:26:27 +11001299 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1300 if (!delalloc && !unwritten)
1301 goto free_buffers;
1302
1303 if (!(gfp_mask & __GFP_FS))
1304 return 0;
1305
1306 /* If we are already inside a transaction or the thread cannot
1307 * do I/O, we cannot release this page.
1308 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001309 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001310 return 0;
1311
1312 /*
1313 * Convert delalloc space to real space, do not flush the
1314 * data out to disk, that will be done by the caller.
1315 * Never need to allocate space here - we will always
1316 * come back to writepage in that case.
1317 */
1318 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1319 if (dirty == 0 && !unwritten)
1320 goto free_buffers;
1321 return 0;
1322
1323free_buffers:
1324 return try_to_free_buffers(page);
1325}
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001328__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 struct inode *inode,
1330 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 struct buffer_head *bh_result,
1332 int create,
1333 int direct,
1334 bmapi_flags_t flags)
1335{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001337 xfs_off_t offset;
1338 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001339 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001342 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001343 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1344 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001345
1346 if (!create && direct && offset >= i_size_read(inode))
1347 return 0;
1348
Lachlan McIlroy541d7d32007-10-11 17:34:33 +10001349 error = xfs_iomap(XFS_I(inode), offset, size,
Nathan Scott67fcaa72006-06-09 17:00:52 +10001350 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 if (error)
1352 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001353 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 return 0;
1355
1356 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001357 /*
1358 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 * the read case (treat as if we're reading into a hole).
1360 */
1361 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001362 xfs_map_buffer(bh_result, &iomap, offset,
1363 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 }
1365 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1366 if (direct)
1367 bh_result->b_private = inode;
1368 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 }
1370 }
1371
Nathan Scottc2536662006-03-29 10:44:40 +10001372 /*
1373 * If this is a realtime file, data may be on a different device.
1374 * to that pointed to from the buffer_head b_bdev currently.
1375 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001376 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
Nathan Scottc2536662006-03-29 10:44:40 +10001378 /*
David Chinner549054a2007-02-10 18:36:35 +11001379 * If we previously allocated a block out beyond eof and we are now
1380 * coming back to use it then we will need to flag it as new even if it
1381 * has a disk address.
1382 *
1383 * With sub-block writes into unwritten extents we also need to mark
1384 * the buffer as new so that the unwritten parts of the buffer gets
1385 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 */
1387 if (create &&
1388 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001389 (offset >= i_size_read(inode)) ||
1390 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
1393 if (iomap.iomap_flags & IOMAP_DELAY) {
1394 BUG_ON(direct);
1395 if (create) {
1396 set_buffer_uptodate(bh_result);
1397 set_buffer_mapped(bh_result);
1398 set_buffer_delay(bh_result);
1399 }
1400 }
1401
Nathan Scottc2536662006-03-29 10:44:40 +10001402 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001403 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1404 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001405 iomap.iomap_bsize - iomap.iomap_delta, size);
1406 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 }
1408
1409 return 0;
1410}
1411
1412int
Nathan Scottc2536662006-03-29 10:44:40 +10001413xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 struct inode *inode,
1415 sector_t iblock,
1416 struct buffer_head *bh_result,
1417 int create)
1418{
Nathan Scottc2536662006-03-29 10:44:40 +10001419 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001420 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
1422
1423STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001424xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 struct inode *inode,
1426 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 struct buffer_head *bh_result,
1428 int create)
1429{
Nathan Scottc2536662006-03-29 10:44:40 +10001430 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001431 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432}
1433
Christoph Hellwigf0973862005-09-05 08:22:52 +10001434STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001435xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001436 struct kiocb *iocb,
1437 loff_t offset,
1438 ssize_t size,
1439 void *private)
1440{
1441 xfs_ioend_t *ioend = iocb->private;
1442
1443 /*
1444 * Non-NULL private data means we need to issue a transaction to
1445 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001446 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001447 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001448 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001449 * it anyway to keep the code uniform and simpler.
1450 *
David Chinnere927af92007-06-05 16:24:36 +10001451 * Well, if only it were that simple. Because synchronous direct I/O
1452 * requires extent conversion to occur *before* we return to userspace,
1453 * we have to wait for extent conversion to complete. Look at the
1454 * iocb that has been passed to us to determine if this is AIO or
1455 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1456 * workqueue and wait for it to complete.
1457 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001458 * The core direct I/O code might be changed to always call the
1459 * completion handler in the future, in which case all this can
1460 * go away.
1461 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001462 ioend->io_offset = offset;
1463 ioend->io_size = size;
1464 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001465 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001466 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001467 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001468 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001469 /*
1470 * A direct I/O write ioend starts it's life in unwritten
1471 * state in case they map an unwritten extent. This write
1472 * didn't map an unwritten extent so switch it's completion
1473 * handler.
1474 */
1475 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
David Chinnere927af92007-06-05 16:24:36 +10001476 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001477 }
1478
1479 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001480 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001481 * completion handler was called. Thus we need to protect
1482 * against double-freeing.
1483 */
1484 iocb->private = NULL;
1485}
1486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001488xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 int rw,
1490 struct kiocb *iocb,
1491 const struct iovec *iov,
1492 loff_t offset,
1493 unsigned long nr_segs)
1494{
1495 struct file *file = iocb->ki_filp;
1496 struct inode *inode = file->f_mapping->host;
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001497 struct block_device *bdev;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001498 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001500 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001502 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001503 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001504 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001505 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001506 xfs_get_blocks_direct,
1507 xfs_end_io_direct);
1508 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001509 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001510 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001511 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001512 xfs_get_blocks_direct,
1513 xfs_end_io_direct);
1514 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001515
Zach Brown8459d862006-12-10 02:21:05 -08001516 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001517 xfs_destroy_ioend(iocb->private);
1518 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519}
1520
Nathan Scottf51623b2006-03-14 13:26:27 +11001521STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001522xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001523 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001524 struct address_space *mapping,
1525 loff_t pos,
1526 unsigned len,
1527 unsigned flags,
1528 struct page **pagep,
1529 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001530{
Nick Piggind79689c2007-10-16 01:25:06 -07001531 *pagep = NULL;
1532 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1533 xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001534}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
1536STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001537xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 struct address_space *mapping,
1539 sector_t block)
1540{
1541 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001542 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Lachlan McIlroycf441ee2008-02-07 16:42:19 +11001544 xfs_itrace_entry(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001545 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001546 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001547 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001548 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549}
1550
1551STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001552xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 struct file *unused,
1554 struct page *page)
1555{
Nathan Scottc2536662006-03-29 10:44:40 +10001556 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557}
1558
1559STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001560xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 struct file *unused,
1562 struct address_space *mapping,
1563 struct list_head *pages,
1564 unsigned nr_pages)
1565{
Nathan Scottc2536662006-03-29 10:44:40 +10001566 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567}
1568
NeilBrown2ff28e22006-03-26 01:37:18 -08001569STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001570xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001571 struct page *page,
1572 unsigned long offset)
1573{
1574 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1575 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001576 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001577}
1578
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001579const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001580 .readpage = xfs_vm_readpage,
1581 .readpages = xfs_vm_readpages,
1582 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001583 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001585 .releasepage = xfs_vm_releasepage,
1586 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001587 .write_begin = xfs_vm_write_begin,
1588 .write_end = generic_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001589 .bmap = xfs_vm_bmap,
1590 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001591 .migratepage = buffer_migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592};