blob: 43f5a75fc3c0f62d5d61c592b2d7f380888855fb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100040#include "xfs_vnodeops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110042#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/writeback.h>
44
Nathan Scottf51623b2006-03-14 13:26:27 +110045STATIC void
46xfs_count_page_state(
47 struct page *page,
48 int *delalloc,
49 int *unmapped,
50 int *unwritten)
51{
52 struct buffer_head *bh, *head;
53
54 *delalloc = *unmapped = *unwritten = 0;
55
56 bh = head = page_buffers(page);
57 do {
58 if (buffer_uptodate(bh) && !buffer_mapped(bh))
59 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110060 else if (buffer_unwritten(bh))
61 (*unwritten) = 1;
62 else if (buffer_delay(bh))
63 (*delalloc) = 1;
64 } while ((bh = bh->b_this_page) != head);
65}
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#if defined(XFS_RW_TRACE)
68void
69xfs_page_trace(
70 int tag,
71 struct inode *inode,
72 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100073 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 xfs_inode_t *ip;
Nathan Scott67fcaa72006-06-09 17:00:52 +100076 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110078 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 int delalloc = -1, unmapped = -1, unwritten = -1;
80
81 if (page_has_buffers(page))
82 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
83
Christoph Hellwig75e17b32006-01-11 20:58:44 +110084 ip = xfs_vtoi(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 if (!ip->i_rwtrace)
86 return;
87
88 ktrace_enter(ip->i_rwtrace,
89 (void *)((unsigned long)tag),
90 (void *)ip,
91 (void *)inode,
92 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100093 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
95 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
96 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
97 (void *)((unsigned long)(isize & 0xffffffff)),
98 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
99 (void *)((unsigned long)(offset & 0xffffffff)),
100 (void *)((unsigned long)delalloc),
101 (void *)((unsigned long)unmapped),
102 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100103 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 (void *)NULL);
105}
106#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000107#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#endif
109
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000110STATIC struct block_device *
111xfs_find_bdev_for_inode(
112 struct xfs_inode *ip)
113{
114 struct xfs_mount *mp = ip->i_mount;
115
116 if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)
117 return mp->m_rtdev_targp->bt_bdev;
118 else
119 return mp->m_ddev_targp->bt_bdev;
120}
121
Christoph Hellwig0829c362005-09-02 16:58:49 +1000122/*
123 * Schedule IO completion handling on a xfsdatad if this was
David Chinnere927af92007-06-05 16:24:36 +1000124 * the final hold on this ioend. If we are asked to wait,
125 * flush the workqueue.
Christoph Hellwig0829c362005-09-02 16:58:49 +1000126 */
127STATIC void
128xfs_finish_ioend(
David Chinnere927af92007-06-05 16:24:36 +1000129 xfs_ioend_t *ioend,
130 int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
David Chinnere927af92007-06-05 16:24:36 +1000132 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig0829c362005-09-02 16:58:49 +1000133 queue_work(xfsdatad_workqueue, &ioend->io_work);
David Chinnere927af92007-06-05 16:24:36 +1000134 if (wait)
135 flush_workqueue(xfsdatad_workqueue);
136 }
Christoph Hellwig0829c362005-09-02 16:58:49 +1000137}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100139/*
140 * We're now finished for good with this ioend structure.
141 * Update the page state via the associated buffer_heads,
142 * release holds on the inode and bio, and finally free
143 * up memory. Do not use the ioend after this.
144 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000145STATIC void
146xfs_destroy_ioend(
147 xfs_ioend_t *ioend)
148{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100149 struct buffer_head *bh, *next;
150
151 for (bh = ioend->io_buffer_head; bh; bh = next) {
152 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000153 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100154 }
Christoph Hellwigb677c212007-08-29 11:46:28 +1000155 if (unlikely(ioend->io_error)) {
156 vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,
157 __FILE__,__LINE__);
158 }
159 vn_iowake(XFS_I(ioend->io_inode));
Christoph Hellwig0829c362005-09-02 16:58:49 +1000160 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161}
162
163/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000164 * Update on-disk file size now that data has been written to disk.
165 * The current in-memory file size is i_size. If a write is beyond
166 * eof io_new_size will be the intended file size until i_size is
167 * updated. If this write does not extend all the way to the valid
168 * file size then restrict this update to the end of the write.
169 */
170STATIC void
171xfs_setfilesize(
172 xfs_ioend_t *ioend)
173{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000174 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000175 xfs_fsize_t isize;
176 xfs_fsize_t bsize;
177
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000178 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
179 ASSERT(ioend->io_type != IOMAP_READ);
180
181 if (unlikely(ioend->io_error))
182 return;
183
184 bsize = ioend->io_offset + ioend->io_size;
185
186 xfs_ilock(ip, XFS_ILOCK_EXCL);
187
188 isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
189 isize = MIN(isize, bsize);
190
191 if (ip->i_d.di_size < isize) {
192 ip->i_d.di_size = isize;
193 ip->i_update_core = 1;
194 ip->i_update_size = 1;
Tim Shimmin150f29e2007-10-16 16:20:12 +1000195 mark_inode_dirty_sync(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000196 }
197
198 xfs_iunlock(ip, XFS_ILOCK_EXCL);
199}
200
201/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100202 * Buffered IO write completion for delayed allocate extents.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100203 */
204STATIC void
205xfs_end_bio_delalloc(
David Howellsc4028952006-11-22 14:57:56 +0000206 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100207{
David Howellsc4028952006-11-22 14:57:56 +0000208 xfs_ioend_t *ioend =
209 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100210
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000211 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100212 xfs_destroy_ioend(ioend);
213}
214
215/*
216 * Buffered IO write completion for regular, written extents.
217 */
218STATIC void
219xfs_end_bio_written(
David Howellsc4028952006-11-22 14:57:56 +0000220 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100221{
David Howellsc4028952006-11-22 14:57:56 +0000222 xfs_ioend_t *ioend =
223 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100224
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000225 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100226 xfs_destroy_ioend(ioend);
227}
228
229/*
230 * IO write completion for unwritten extents.
231 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000233 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 */
235STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000236xfs_end_bio_unwritten(
David Howellsc4028952006-11-22 14:57:56 +0000237 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
David Howellsc4028952006-11-22 14:57:56 +0000239 xfs_ioend_t *ioend =
240 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000241 xfs_off_t offset = ioend->io_offset;
242 size_t size = ioend->io_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000244 if (likely(!ioend->io_error)) {
Christoph Hellwigb677c212007-08-29 11:46:28 +1000245 xfs_bmap(XFS_I(ioend->io_inode), offset, size,
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000246 BMAPI_UNWRITTEN, NULL, NULL);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000247 xfs_setfilesize(ioend);
248 }
249 xfs_destroy_ioend(ioend);
250}
251
252/*
253 * IO read completion for regular, written extents.
254 */
255STATIC void
256xfs_end_bio_read(
257 struct work_struct *work)
258{
259 xfs_ioend_t *ioend =
260 container_of(work, xfs_ioend_t, io_work);
261
Christoph Hellwig0829c362005-09-02 16:58:49 +1000262 xfs_destroy_ioend(ioend);
263}
264
265/*
266 * Allocate and initialise an IO completion structure.
267 * We need to track unwritten extent write completion here initially.
268 * We'll need to extend this for updating the ondisk inode size later
269 * (vs. incore size).
270 */
271STATIC xfs_ioend_t *
272xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100273 struct inode *inode,
274 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000275{
276 xfs_ioend_t *ioend;
277
278 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
279
280 /*
281 * Set the count to 1 initially, which will prevent an I/O
282 * completion callback from happening before we have started
283 * all the I/O from calling the completion routine too early.
284 */
285 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000286 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100287 ioend->io_list = NULL;
288 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000289 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000290 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100291 ioend->io_buffer_tail = NULL;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000292 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000293 ioend->io_offset = 0;
294 ioend->io_size = 0;
295
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100296 if (type == IOMAP_UNWRITTEN)
David Howellsc4028952006-11-22 14:57:56 +0000297 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100298 else if (type == IOMAP_DELAY)
David Howellsc4028952006-11-22 14:57:56 +0000299 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000300 else if (type == IOMAP_READ)
301 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100302 else
David Howellsc4028952006-11-22 14:57:56 +0000303 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000304
305 return ioend;
306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308STATIC int
309xfs_map_blocks(
310 struct inode *inode,
311 loff_t offset,
312 ssize_t count,
313 xfs_iomap_t *mapp,
314 int flags)
315{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000316 xfs_inode_t *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 int error, nmaps = 1;
318
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000319 error = xfs_bmap(ip, offset, count,
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000320 flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000322 xfs_iflags_set(ip, XFS_IMODIFIED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 return -error;
324}
325
David Chinner7989cb82007-02-10 18:34:56 +1100326STATIC_INLINE int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100327xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100329 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100331 return offset >= iomapp->iomap_offset &&
332 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100335/*
336 * BIO completion handler for buffered IO.
337 */
Al Viro782e3b32007-10-12 07:17:47 +0100338STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100339xfs_end_bio(
340 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100341 int error)
342{
343 xfs_ioend_t *ioend = bio->bi_private;
344
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100345 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000346 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100347
348 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100349 bio->bi_private = NULL;
350 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100351 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000352
David Chinnere927af92007-06-05 16:24:36 +1000353 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100354}
355
356STATIC void
357xfs_submit_ioend_bio(
358 xfs_ioend_t *ioend,
359 struct bio *bio)
360{
361 atomic_inc(&ioend->io_remaining);
362
363 bio->bi_private = ioend;
364 bio->bi_end_io = xfs_end_bio;
365
366 submit_bio(WRITE, bio);
367 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
368 bio_put(bio);
369}
370
371STATIC struct bio *
372xfs_alloc_ioend_bio(
373 struct buffer_head *bh)
374{
375 struct bio *bio;
376 int nvecs = bio_get_nr_vecs(bh->b_bdev);
377
378 do {
379 bio = bio_alloc(GFP_NOIO, nvecs);
380 nvecs >>= 1;
381 } while (!bio);
382
383 ASSERT(bio->bi_private == NULL);
384 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
385 bio->bi_bdev = bh->b_bdev;
386 bio_get(bio);
387 return bio;
388}
389
390STATIC void
391xfs_start_buffer_writeback(
392 struct buffer_head *bh)
393{
394 ASSERT(buffer_mapped(bh));
395 ASSERT(buffer_locked(bh));
396 ASSERT(!buffer_delay(bh));
397 ASSERT(!buffer_unwritten(bh));
398
399 mark_buffer_async_write(bh);
400 set_buffer_uptodate(bh);
401 clear_buffer_dirty(bh);
402}
403
404STATIC void
405xfs_start_page_writeback(
406 struct page *page,
407 struct writeback_control *wbc,
408 int clear_dirty,
409 int buffers)
410{
411 ASSERT(PageLocked(page));
412 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100413 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100414 clear_page_dirty_for_io(page);
415 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100416 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700417 /* If no buffers on the page are to be written, finish it here */
418 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100419 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100420}
421
422static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
423{
424 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
425}
426
427/*
David Chinnerd88992f2006-01-18 13:38:12 +1100428 * Submit all of the bios for all of the ioends we have saved up, covering the
429 * initial writepage page and also any probed pages.
430 *
431 * Because we may have multiple ioends spanning a page, we need to start
432 * writeback on all the buffers before we submit them for I/O. If we mark the
433 * buffers as we got, then we can end up with a page that only has buffers
434 * marked async write and I/O complete on can occur before we mark the other
435 * buffers async write.
436 *
437 * The end result of this is that we trip a bug in end_page_writeback() because
438 * we call it twice for the one page as the code in end_buffer_async_write()
439 * assumes that all buffers on the page are started at the same time.
440 *
441 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000442 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100443 */
444STATIC void
445xfs_submit_ioend(
446 xfs_ioend_t *ioend)
447{
David Chinnerd88992f2006-01-18 13:38:12 +1100448 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100449 xfs_ioend_t *next;
450 struct buffer_head *bh;
451 struct bio *bio;
452 sector_t lastblock = 0;
453
David Chinnerd88992f2006-01-18 13:38:12 +1100454 /* Pass 1 - start writeback */
455 do {
456 next = ioend->io_list;
457 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
458 xfs_start_buffer_writeback(bh);
459 }
460 } while ((ioend = next) != NULL);
461
462 /* Pass 2 - submit I/O */
463 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100464 do {
465 next = ioend->io_list;
466 bio = NULL;
467
468 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100469
470 if (!bio) {
471 retry:
472 bio = xfs_alloc_ioend_bio(bh);
473 } else if (bh->b_blocknr != lastblock + 1) {
474 xfs_submit_ioend_bio(ioend, bio);
475 goto retry;
476 }
477
478 if (bio_add_buffer(bio, bh) != bh->b_size) {
479 xfs_submit_ioend_bio(ioend, bio);
480 goto retry;
481 }
482
483 lastblock = bh->b_blocknr;
484 }
485 if (bio)
486 xfs_submit_ioend_bio(ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000487 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100488 } while ((ioend = next) != NULL);
489}
490
491/*
492 * Cancel submission of all buffer_heads so far in this endio.
493 * Toss the endio too. Only ever called for the initial page
494 * in a writepage request, so only ever one page.
495 */
496STATIC void
497xfs_cancel_ioend(
498 xfs_ioend_t *ioend)
499{
500 xfs_ioend_t *next;
501 struct buffer_head *bh, *next_bh;
502
503 do {
504 next = ioend->io_list;
505 bh = ioend->io_buffer_head;
506 do {
507 next_bh = bh->b_private;
508 clear_buffer_async_write(bh);
509 unlock_buffer(bh);
510 } while ((bh = next_bh) != NULL);
511
Christoph Hellwigb677c212007-08-29 11:46:28 +1000512 vn_iowake(XFS_I(ioend->io_inode));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100513 mempool_free(ioend, xfs_ioend_pool);
514 } while ((ioend = next) != NULL);
515}
516
517/*
518 * Test to see if we've been building up a completion structure for
519 * earlier buffers -- if so, we try to append to this ioend if we
520 * can, otherwise we finish off any current ioend and start another.
521 * Return true if we've finished the given ioend.
522 */
523STATIC void
524xfs_add_to_ioend(
525 struct inode *inode,
526 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100527 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100528 unsigned int type,
529 xfs_ioend_t **result,
530 int need_ioend)
531{
532 xfs_ioend_t *ioend = *result;
533
534 if (!ioend || need_ioend || type != ioend->io_type) {
535 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100536
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100537 ioend = xfs_alloc_ioend(inode, type);
538 ioend->io_offset = offset;
539 ioend->io_buffer_head = bh;
540 ioend->io_buffer_tail = bh;
541 if (previous)
542 previous->io_list = ioend;
543 *result = ioend;
544 } else {
545 ioend->io_buffer_tail->b_private = bh;
546 ioend->io_buffer_tail = bh;
547 }
548
549 bh->b_private = NULL;
550 ioend->io_size += bh->b_size;
551}
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100554xfs_map_buffer(
555 struct buffer_head *bh,
556 xfs_iomap_t *mp,
557 xfs_off_t offset,
558 uint block_bits)
559{
560 sector_t bn;
561
562 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
563
564 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
565 ((offset - mp->iomap_offset) >> block_bits);
566
567 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
568
569 bh->b_blocknr = bn;
570 set_buffer_mapped(bh);
571}
572
573STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100576 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100578 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
581 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100584 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100585 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 set_buffer_mapped(bh);
587 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100588 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
590
591/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100592 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 */
594STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100595xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100596 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100597 unsigned int pg_offset,
598 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 int ret = 0;
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100603 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605 if (page->mapping && PageDirty(page)) {
606 if (page_has_buffers(page)) {
607 struct buffer_head *bh, *head;
608
609 bh = head = page_buffers(page);
610 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100611 if (!buffer_uptodate(bh))
612 break;
613 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 break;
615 ret += bh->b_size;
616 if (ret >= pg_offset)
617 break;
618 } while ((bh = bh->b_this_page) != head);
619 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100620 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 }
622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 return ret;
624}
625
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100626STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100627xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 struct inode *inode,
629 struct page *startpage,
630 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100631 struct buffer_head *head,
632 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100634 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100636 size_t total = 0;
637 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 /* First sum forwards in this page */
640 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100641 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100642 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 total += bh->b_size;
644 } while ((bh = bh->b_this_page) != head);
645
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100646 /* if we reached the end of the page, sum forwards in following pages */
647 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
648 tindex = startpage->index + 1;
649
650 /* Prune this back to avoid pathological behavior */
651 tloff = min(tlast, startpage->index + 64);
652
653 pagevec_init(&pvec, 0);
654 while (!done && tindex <= tloff) {
655 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
656
657 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
658 break;
659
660 for (i = 0; i < pagevec_count(&pvec); i++) {
661 struct page *page = pvec.pages[i];
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000662 size_t pg_offset, pg_len = 0;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100663
664 if (tindex == tlast) {
665 pg_offset =
666 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100667 if (!pg_offset) {
668 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100669 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100670 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100671 } else
672 pg_offset = PAGE_CACHE_SIZE;
673
674 if (page->index == tindex && !TestSetPageLocked(page)) {
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000675 pg_len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100676 unlock_page(page);
677 }
678
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000679 if (!pg_len) {
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100680 done = 1;
681 break;
682 }
683
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000684 total += pg_len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100685 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100687
688 pagevec_release(&pvec);
689 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 return total;
693}
694
695/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100696 * Test if a given page is suitable for writing as part of an unwritten
697 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100699STATIC int
700xfs_is_delayed_page(
701 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100702 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100705 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 if (page->mapping && page_has_buffers(page)) {
708 struct buffer_head *bh, *head;
709 int acceptable = 0;
710
711 bh = head = page_buffers(page);
712 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100713 if (buffer_unwritten(bh))
714 acceptable = (type == IOMAP_UNWRITTEN);
715 else if (buffer_delay(bh))
716 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100717 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000718 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100719 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 } while ((bh = bh->b_this_page) != head);
722
723 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100724 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 }
726
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100727 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730/*
731 * Allocate & map buffers for page given the extent map. Write it out.
732 * except for the original page of a writepage, this is called on
733 * delalloc/unwritten pages only, for the original page it is possible
734 * that the page has no mapping at all.
735 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100736STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737xfs_convert_page(
738 struct inode *inode,
739 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100740 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100741 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100742 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 int startio,
745 int all_bh)
746{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100747 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100748 xfs_off_t end_offset;
749 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100750 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700752 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100753 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100754 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100756 if (page->index != tindex)
757 goto fail;
758 if (TestSetPageLocked(page))
759 goto fail;
760 if (PageWriteback(page))
761 goto fail_unlock_page;
762 if (page->mapping != inode->i_mapping)
763 goto fail_unlock_page;
764 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
765 goto fail_unlock_page;
766
Nathan Scott24e17b52005-05-05 13:33:20 -0700767 /*
768 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000769 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100770 *
771 * Derivation:
772 *
773 * End offset is the highest offset that this page should represent.
774 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
775 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
776 * hence give us the correct page_dirty count. On any other page,
777 * it will be zero and in that case we need page_dirty to be the
778 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700779 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100780 end_offset = min_t(unsigned long long,
781 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
782 i_size_read(inode));
783
Nathan Scott24e17b52005-05-05 13:33:20 -0700784 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100785 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
786 PAGE_CACHE_SIZE);
787 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
788 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 bh = head = page_buffers(page);
791 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100792 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100794 if (!buffer_uptodate(bh))
795 uptodate = 0;
796 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
797 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100799 }
800
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100801 if (buffer_unwritten(bh) || buffer_delay(bh)) {
802 if (buffer_unwritten(bh))
803 type = IOMAP_UNWRITTEN;
804 else
805 type = IOMAP_DELAY;
806
807 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100808 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100809 continue;
810 }
811
812 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
813 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
814
815 xfs_map_at_offset(bh, offset, bbits, mp);
816 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100817 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100818 type, ioendp, done);
819 } else {
820 set_buffer_dirty(bh);
821 unlock_buffer(bh);
822 mark_buffer_dirty(bh);
823 }
824 page_dirty--;
825 count++;
826 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000827 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100828 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100830 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100831 type, ioendp, done);
832 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700833 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100834 } else {
835 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100838 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100840 if (uptodate && bh == head)
841 SetPageUptodate(page);
842
843 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100844 if (count) {
845 struct backing_dev_info *bdi;
846
847 bdi = inode->i_mapping->backing_dev_info;
David Chinner9fddaca2006-02-07 20:27:24 +1100848 wbc->nr_to_write--;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100849 if (bdi_write_congested(bdi)) {
850 wbc->encountered_congestion = 1;
851 done = 1;
David Chinner9fddaca2006-02-07 20:27:24 +1100852 } else if (wbc->nr_to_write <= 0) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100853 done = 1;
854 }
855 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100856 xfs_start_page_writeback(page, wbc, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100858
859 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100860 fail_unlock_page:
861 unlock_page(page);
862 fail:
863 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
866/*
867 * Convert & write out a cluster of pages in the same extent as defined
868 * by mp and following the start page.
869 */
870STATIC void
871xfs_cluster_write(
872 struct inode *inode,
873 pgoff_t tindex,
874 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100875 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 struct writeback_control *wbc,
877 int startio,
878 int all_bh,
879 pgoff_t tlast)
880{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100881 struct pagevec pvec;
882 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100884 pagevec_init(&pvec, 0);
885 while (!done && tindex <= tlast) {
886 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
887
888 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100890
891 for (i = 0; i < pagevec_count(&pvec); i++) {
892 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
893 iomapp, ioendp, wbc, startio, all_bh);
894 if (done)
895 break;
896 }
897
898 pagevec_release(&pvec);
899 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 }
901}
902
903/*
904 * Calling this without startio set means we are being asked to make a dirty
905 * page ready for freeing it's buffers. When called with startio set then
906 * we are coming from writepage.
907 *
908 * When called with startio set it is important that we write the WHOLE
909 * page if possible.
910 * The bh->b_state's cannot know if any of the blocks or which block for
911 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000912 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 * may clear the page dirty flag prior to calling write page, under the
914 * assumption the entire page will be written out; by not writing out the
915 * whole page the page can be reused before all valid dirty data is
916 * written out. Note: in the case of a page that has been dirty'd by
917 * mapwrite and but partially setup by block_prepare_write the
918 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
919 * valid state, thus the whole page must be written out thing.
920 */
921
922STATIC int
923xfs_page_state_convert(
924 struct inode *inode,
925 struct page *page,
926 struct writeback_control *wbc,
927 int startio,
928 int unmapped) /* also implies page uptodate */
929{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100930 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100931 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100932 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 loff_t offset;
934 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100935 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 __uint64_t end_offset;
937 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100938 ssize_t size, len;
939 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000940 int page_dirty, count = 0;
941 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100942 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Nathan Scott82721452006-04-11 15:10:55 +1000944 if (startio) {
945 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
946 trylock |= BMAPI_TRYLOCK;
947 }
Daniel Moore3ba08152005-05-05 13:31:34 -0700948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 /* Is this page beyond the end of the file? */
950 offset = i_size_read(inode);
951 end_index = offset >> PAGE_CACHE_SHIFT;
952 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
953 if (page->index >= end_index) {
954 if ((page->index >= end_index + 1) ||
955 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100956 if (startio)
957 unlock_page(page);
958 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 }
960 }
961
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 /*
Nathan Scott24e17b52005-05-05 13:33:20 -0700963 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000964 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100965 *
966 * Derivation:
967 *
968 * End offset is the highest offset that this page should represent.
969 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
970 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
971 * hence give us the correct page_dirty count. On any other page,
972 * it will be zero and in that case we need page_dirty to be the
973 * count of buffers on the page.
974 */
975 end_offset = min_t(unsigned long long,
976 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700977 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100978 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
979 PAGE_CACHE_SIZE);
980 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -0700981 page_dirty = p_offset / len;
982
Nathan Scott24e17b52005-05-05 13:33:20 -0700983 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100984 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +1000985 flags = BMAPI_READ;
986 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100987
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100988 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
990 do {
991 if (offset >= end_offset)
992 break;
993 if (!buffer_uptodate(bh))
994 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100995 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100996 /*
997 * the iomap is actually still valid, but the ioend
998 * isn't. shouldn't happen too often.
999 */
1000 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001002 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001004 if (iomap_valid)
1005 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 /*
1008 * First case, map an unwritten extent and prepare for
1009 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 * Second case, allocate space for a delalloc buffer.
1012 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001013 *
1014 * Third case, an unmapped buffer was found, and we are
1015 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001016 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001017 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1018 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1019 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnereffd1202007-06-18 16:49:58 +10001020 int new_ioend = 0;
1021
David Chinnerdf3c7242007-05-24 15:27:03 +10001022 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001023 * Make sure we don't use a read-only iomap
1024 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001025 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001026 iomap_valid = 0;
1027
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001028 if (buffer_unwritten(bh)) {
1029 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001030 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001031 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001032 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001033 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001034 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001035 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001036 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001037 }
1038
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001039 if (!iomap_valid) {
David Chinnereffd1202007-06-18 16:49:58 +10001040 /*
1041 * if we didn't have a valid mapping then we
1042 * need to ensure that we put the new mapping
1043 * in a new ioend structure. This needs to be
1044 * done to ensure that the ioends correctly
1045 * reflect the block mappings at io completion
1046 * for unwritten extent conversion.
1047 */
1048 new_ioend = 1;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001049 if (type == IOMAP_NEW) {
1050 size = xfs_probe_cluster(inode,
1051 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001052 } else {
1053 size = len;
1054 }
1055
1056 err = xfs_map_blocks(inode, offset, size,
1057 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001058 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001060 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001062 if (iomap_valid) {
1063 xfs_map_at_offset(bh, offset,
1064 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001066 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001067 type, &ioend,
David Chinnereffd1202007-06-18 16:49:58 +10001068 new_ioend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 } else {
1070 set_buffer_dirty(bh);
1071 unlock_buffer(bh);
1072 mark_buffer_dirty(bh);
1073 }
1074 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001075 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001077 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001078 /*
1079 * we got here because the buffer is already mapped.
1080 * That means it must already have extents allocated
1081 * underneath it. Map the extent by reading it.
1082 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001083 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001084 flags = BMAPI_READ;
1085 size = xfs_probe_cluster(inode, page, bh,
1086 head, 1);
1087 err = xfs_map_blocks(inode, offset, size,
1088 &iomap, flags);
1089 if (err)
1090 goto error;
1091 iomap_valid = xfs_iomap_valid(&iomap, offset);
1092 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
David Chinnerdf3c7242007-05-24 15:27:03 +10001094 /*
1095 * We set the type to IOMAP_NEW in case we are doing a
1096 * small write at EOF that is extending the file but
1097 * without needing an allocation. We need to update the
1098 * file size on I/O completion in this case so it is
1099 * the same case as having just allocated a new extent
1100 * that we are writing into for the first time.
1101 */
1102 type = IOMAP_NEW;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001103 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1104 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001105 if (iomap_valid)
1106 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001107 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001108 &ioend, !iomap_valid);
1109 page_dirty--;
1110 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001111 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001112 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001114 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1115 (unmapped || startio)) {
1116 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001118
1119 if (!iohead)
1120 iohead = ioend;
1121
1122 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
1124 if (uptodate && bh == head)
1125 SetPageUptodate(page);
1126
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001127 if (startio)
1128 xfs_start_page_writeback(page, wbc, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001130 if (ioend && iomap_valid) {
1131 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001133 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001134 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001135 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 }
1137
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001138 if (iohead)
1139 xfs_submit_ioend(iohead);
1140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 return page_dirty;
1142
1143error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001144 if (iohead)
1145 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 /*
1148 * If it's delalloc and we have nowhere to put it,
1149 * throw it away, unless the lower layers told
1150 * us to try again.
1151 */
1152 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001153 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 ClearPageUptodate(page);
1156 }
1157 return err;
1158}
1159
Nathan Scottf51623b2006-03-14 13:26:27 +11001160/*
1161 * writepage: Called from one of two places:
1162 *
1163 * 1. we are flushing a delalloc buffer head.
1164 *
1165 * 2. we are writing out a dirty page. Typically the page dirty
1166 * state is cleared before we get here. In this case is it
1167 * conceivable we have no buffer heads.
1168 *
1169 * For delalloc space on the page we need to allocate space and
1170 * flush it. For unmapped buffer heads on the page we should
1171 * allocate space if the page is uptodate. For any other dirty
1172 * buffer heads on the page we should flush them.
1173 *
1174 * If we detect that a transaction would be required to flush
1175 * the page, we have to check the process flags first, if we
1176 * are already in a transaction or disk I/O during allocations
1177 * is off, we need to fail the writepage and redirty the page.
1178 */
1179
1180STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001181xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001182 struct page *page,
1183 struct writeback_control *wbc)
1184{
1185 int error;
1186 int need_trans;
1187 int delalloc, unmapped, unwritten;
1188 struct inode *inode = page->mapping->host;
1189
1190 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1191
1192 /*
1193 * We need a transaction if:
1194 * 1. There are delalloc buffers on the page
1195 * 2. The page is uptodate and we have unmapped buffers
1196 * 3. The page is uptodate and we have no buffers
1197 * 4. There are unwritten buffers on the page
1198 */
1199
1200 if (!page_has_buffers(page)) {
1201 unmapped = 1;
1202 need_trans = 1;
1203 } else {
1204 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1205 if (!PageUptodate(page))
1206 unmapped = 0;
1207 need_trans = delalloc + unmapped + unwritten;
1208 }
1209
1210 /*
1211 * If we need a transaction and the process flags say
1212 * we are already in a transaction, or no IO is allowed
1213 * then mark the page dirty again and leave the page
1214 * as is.
1215 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001216 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001217 goto out_fail;
1218
1219 /*
1220 * Delay hooking up buffer heads until we have
1221 * made our go/no-go decision.
1222 */
1223 if (!page_has_buffers(page))
1224 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1225
1226 /*
1227 * Convert delayed allocate, unwritten or unmapped space
1228 * to real space and flush out to disk.
1229 */
1230 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1231 if (error == -EAGAIN)
1232 goto out_fail;
1233 if (unlikely(error < 0))
1234 goto out_unlock;
1235
1236 return 0;
1237
1238out_fail:
1239 redirty_page_for_writepage(wbc, page);
1240 unlock_page(page);
1241 return 0;
1242out_unlock:
1243 unlock_page(page);
1244 return error;
1245}
1246
Nathan Scott7d4fb402006-06-09 15:27:16 +10001247STATIC int
1248xfs_vm_writepages(
1249 struct address_space *mapping,
1250 struct writeback_control *wbc)
1251{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001252 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001253 return generic_writepages(mapping, wbc);
1254}
1255
Nathan Scottf51623b2006-03-14 13:26:27 +11001256/*
1257 * Called to move a page into cleanable state - and from there
1258 * to be released. Possibly the page is already clean. We always
1259 * have buffer heads in this call.
1260 *
1261 * Returns 0 if the page is ok to release, 1 otherwise.
1262 *
1263 * Possible scenarios are:
1264 *
1265 * 1. We are being called to release a page which has been written
1266 * to via regular I/O. buffer heads will be dirty and possibly
1267 * delalloc. If no delalloc buffer heads in this case then we
1268 * can just return zero.
1269 *
1270 * 2. We are called to release a page which has been written via
1271 * mmap, all we need to do is ensure there is no delalloc
1272 * state in the buffer heads, if not we can let the caller
1273 * free them and we should come back later via writepage.
1274 */
1275STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001276xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001277 struct page *page,
1278 gfp_t gfp_mask)
1279{
1280 struct inode *inode = page->mapping->host;
1281 int dirty, delalloc, unmapped, unwritten;
1282 struct writeback_control wbc = {
1283 .sync_mode = WB_SYNC_ALL,
1284 .nr_to_write = 1,
1285 };
1286
Nathan Scotted9d88f2006-09-28 10:56:43 +10001287 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001288
Nathan Scott238f4c52006-03-17 17:26:25 +11001289 if (!page_has_buffers(page))
1290 return 0;
1291
Nathan Scottf51623b2006-03-14 13:26:27 +11001292 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1293 if (!delalloc && !unwritten)
1294 goto free_buffers;
1295
1296 if (!(gfp_mask & __GFP_FS))
1297 return 0;
1298
1299 /* If we are already inside a transaction or the thread cannot
1300 * do I/O, we cannot release this page.
1301 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001302 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001303 return 0;
1304
1305 /*
1306 * Convert delalloc space to real space, do not flush the
1307 * data out to disk, that will be done by the caller.
1308 * Never need to allocate space here - we will always
1309 * come back to writepage in that case.
1310 */
1311 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1312 if (dirty == 0 && !unwritten)
1313 goto free_buffers;
1314 return 0;
1315
1316free_buffers:
1317 return try_to_free_buffers(page);
1318}
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001321__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 struct inode *inode,
1323 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 struct buffer_head *bh_result,
1325 int create,
1326 int direct,
1327 bmapi_flags_t flags)
1328{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001330 xfs_off_t offset;
1331 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001332 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001335 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001336 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1337 size = bh_result->b_size;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001338 error = xfs_bmap(XFS_I(inode), offset, size,
Nathan Scott67fcaa72006-06-09 17:00:52 +10001339 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 if (error)
1341 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001342 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 return 0;
1344
1345 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001346 /*
1347 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 * the read case (treat as if we're reading into a hole).
1349 */
1350 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001351 xfs_map_buffer(bh_result, &iomap, offset,
1352 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 }
1354 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1355 if (direct)
1356 bh_result->b_private = inode;
1357 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 }
1359 }
1360
Nathan Scottc2536662006-03-29 10:44:40 +10001361 /*
1362 * If this is a realtime file, data may be on a different device.
1363 * to that pointed to from the buffer_head b_bdev currently.
1364 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001365 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Nathan Scottc2536662006-03-29 10:44:40 +10001367 /*
David Chinner549054a2007-02-10 18:36:35 +11001368 * If we previously allocated a block out beyond eof and we are now
1369 * coming back to use it then we will need to flag it as new even if it
1370 * has a disk address.
1371 *
1372 * With sub-block writes into unwritten extents we also need to mark
1373 * the buffer as new so that the unwritten parts of the buffer gets
1374 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 */
1376 if (create &&
1377 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001378 (offset >= i_size_read(inode)) ||
1379 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
1382 if (iomap.iomap_flags & IOMAP_DELAY) {
1383 BUG_ON(direct);
1384 if (create) {
1385 set_buffer_uptodate(bh_result);
1386 set_buffer_mapped(bh_result);
1387 set_buffer_delay(bh_result);
1388 }
1389 }
1390
Nathan Scottc2536662006-03-29 10:44:40 +10001391 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001392 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1393 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001394 iomap.iomap_bsize - iomap.iomap_delta, size);
1395 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 }
1397
1398 return 0;
1399}
1400
1401int
Nathan Scottc2536662006-03-29 10:44:40 +10001402xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 struct inode *inode,
1404 sector_t iblock,
1405 struct buffer_head *bh_result,
1406 int create)
1407{
Nathan Scottc2536662006-03-29 10:44:40 +10001408 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001409 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410}
1411
1412STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001413xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 struct inode *inode,
1415 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 struct buffer_head *bh_result,
1417 int create)
1418{
Nathan Scottc2536662006-03-29 10:44:40 +10001419 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001420 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
1422
Christoph Hellwigf0973862005-09-05 08:22:52 +10001423STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001424xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001425 struct kiocb *iocb,
1426 loff_t offset,
1427 ssize_t size,
1428 void *private)
1429{
1430 xfs_ioend_t *ioend = iocb->private;
1431
1432 /*
1433 * Non-NULL private data means we need to issue a transaction to
1434 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001435 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001436 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001437 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001438 * it anyway to keep the code uniform and simpler.
1439 *
David Chinnere927af92007-06-05 16:24:36 +10001440 * Well, if only it were that simple. Because synchronous direct I/O
1441 * requires extent conversion to occur *before* we return to userspace,
1442 * we have to wait for extent conversion to complete. Look at the
1443 * iocb that has been passed to us to determine if this is AIO or
1444 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1445 * workqueue and wait for it to complete.
1446 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001447 * The core direct I/O code might be changed to always call the
1448 * completion handler in the future, in which case all this can
1449 * go away.
1450 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001451 ioend->io_offset = offset;
1452 ioend->io_size = size;
1453 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001454 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001455 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001456 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001457 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001458 /*
1459 * A direct I/O write ioend starts it's life in unwritten
1460 * state in case they map an unwritten extent. This write
1461 * didn't map an unwritten extent so switch it's completion
1462 * handler.
1463 */
1464 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
David Chinnere927af92007-06-05 16:24:36 +10001465 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001466 }
1467
1468 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001469 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001470 * completion handler was called. Thus we need to protect
1471 * against double-freeing.
1472 */
1473 iocb->private = NULL;
1474}
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001477xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 int rw,
1479 struct kiocb *iocb,
1480 const struct iovec *iov,
1481 loff_t offset,
1482 unsigned long nr_segs)
1483{
1484 struct file *file = iocb->ki_filp;
1485 struct inode *inode = file->f_mapping->host;
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001486 struct block_device *bdev;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001487 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001489 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001491 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001492 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001493 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001494 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001495 xfs_get_blocks_direct,
1496 xfs_end_io_direct);
1497 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001498 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001499 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001500 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001501 xfs_get_blocks_direct,
1502 xfs_end_io_direct);
1503 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001504
Zach Brown8459d862006-12-10 02:21:05 -08001505 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001506 xfs_destroy_ioend(iocb->private);
1507 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
1509
Nathan Scottf51623b2006-03-14 13:26:27 +11001510STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001511xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001512 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001513 struct address_space *mapping,
1514 loff_t pos,
1515 unsigned len,
1516 unsigned flags,
1517 struct page **pagep,
1518 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001519{
Nick Piggind79689c2007-10-16 01:25:06 -07001520 *pagep = NULL;
1521 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1522 xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001523}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
1525STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001526xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 struct address_space *mapping,
1528 sector_t block)
1529{
1530 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001531 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Lachlan McIlroycf441ee2008-02-07 16:42:19 +11001533 xfs_itrace_entry(XFS_I(inode));
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001534 xfs_rwlock(ip, VRWLOCK_READ);
1535 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1536 xfs_rwunlock(ip, VRWLOCK_READ);
Nathan Scottc2536662006-03-29 10:44:40 +10001537 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538}
1539
1540STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001541xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 struct file *unused,
1543 struct page *page)
1544{
Nathan Scottc2536662006-03-29 10:44:40 +10001545 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
1548STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001549xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 struct file *unused,
1551 struct address_space *mapping,
1552 struct list_head *pages,
1553 unsigned nr_pages)
1554{
Nathan Scottc2536662006-03-29 10:44:40 +10001555 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556}
1557
NeilBrown2ff28e22006-03-26 01:37:18 -08001558STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001559xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001560 struct page *page,
1561 unsigned long offset)
1562{
1563 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1564 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001565 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001566}
1567
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001568const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001569 .readpage = xfs_vm_readpage,
1570 .readpages = xfs_vm_readpages,
1571 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001572 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001574 .releasepage = xfs_vm_releasepage,
1575 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001576 .write_begin = xfs_vm_write_begin,
1577 .write_end = generic_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001578 .bmap = xfs_vm_bmap,
1579 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001580 .migratepage = buffer_migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581};