blob: 709347d6f468e2047c65466537dd35ac568d6bfd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include "xfs_log.h"
20#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "xfs_mount.h"
24#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_dinode.h"
26#include "xfs_inode.h"
Christoph Hellwig281627d2012-03-13 08:41:05 +000027#include "xfs_inode_item.h"
Nathan Scotta844f452005-11-02 14:38:42 +110028#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100031#include "xfs_vnodeops.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000032#include "xfs_trace.h"
Dave Chinner3ed3a432010-03-05 02:00:42 +000033#include "xfs_bmap.h"
Dave Chinner68988112013-08-12 20:49:42 +100034#include "xfs_bmap_util.h"
Kent Overstreeta27bb332013-05-07 16:19:08 -070035#include <linux/aio.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110038#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/writeback.h>
40
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000041void
Nathan Scottf51623b2006-03-14 13:26:27 +110042xfs_count_page_state(
43 struct page *page,
44 int *delalloc,
Nathan Scottf51623b2006-03-14 13:26:27 +110045 int *unwritten)
46{
47 struct buffer_head *bh, *head;
48
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100049 *delalloc = *unwritten = 0;
Nathan Scottf51623b2006-03-14 13:26:27 +110050
51 bh = head = page_buffers(page);
52 do {
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100053 if (buffer_unwritten(bh))
Nathan Scottf51623b2006-03-14 13:26:27 +110054 (*unwritten) = 1;
55 else if (buffer_delay(bh))
56 (*delalloc) = 1;
57 } while ((bh = bh->b_this_page) != head);
58}
59
Christoph Hellwig6214ed42007-09-14 15:23:17 +100060STATIC struct block_device *
61xfs_find_bdev_for_inode(
Christoph Hellwig046f1682010-04-28 12:28:52 +000062 struct inode *inode)
Christoph Hellwig6214ed42007-09-14 15:23:17 +100063{
Christoph Hellwig046f1682010-04-28 12:28:52 +000064 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig6214ed42007-09-14 15:23:17 +100065 struct xfs_mount *mp = ip->i_mount;
66
Eric Sandeen71ddabb2007-11-23 16:29:42 +110067 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +100068 return mp->m_rtdev_targp->bt_bdev;
69 else
70 return mp->m_ddev_targp->bt_bdev;
71}
72
Christoph Hellwig0829c362005-09-02 16:58:49 +100073/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110074 * We're now finished for good with this ioend structure.
75 * Update the page state via the associated buffer_heads,
76 * release holds on the inode and bio, and finally free
77 * up memory. Do not use the ioend after this.
78 */
Christoph Hellwig0829c362005-09-02 16:58:49 +100079STATIC void
80xfs_destroy_ioend(
81 xfs_ioend_t *ioend)
82{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110083 struct buffer_head *bh, *next;
84
85 for (bh = ioend->io_buffer_head; bh; bh = next) {
86 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +100087 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110088 }
Christoph Hellwig583fa582008-12-03 12:20:38 +010089
Christoph Hellwigc859cdd2011-08-23 08:28:10 +000090 if (ioend->io_iocb) {
Jan Kara4b05d092013-01-23 13:56:18 +010091 inode_dio_done(ioend->io_inode);
Christoph Hellwig04f658e2011-08-24 05:59:25 +000092 if (ioend->io_isasync) {
93 aio_complete(ioend->io_iocb, ioend->io_error ?
94 ioend->io_error : ioend->io_result, 0);
95 }
Christoph Hellwigc859cdd2011-08-23 08:28:10 +000096 }
Christoph Hellwig4a06fd22011-08-23 08:28:13 +000097
Christoph Hellwig0829c362005-09-02 16:58:49 +100098 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/*
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000102 * Fast and loose check if this write could update the on-disk inode size.
103 */
104static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
105{
106 return ioend->io_offset + ioend->io_size >
107 XFS_I(ioend->io_inode)->i_d.di_size;
108}
109
Christoph Hellwig281627d2012-03-13 08:41:05 +0000110STATIC int
111xfs_setfilesize_trans_alloc(
112 struct xfs_ioend *ioend)
113{
114 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
115 struct xfs_trans *tp;
116 int error;
117
118 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
119
120 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
121 if (error) {
122 xfs_trans_cancel(tp, 0);
123 return error;
124 }
125
126 ioend->io_append_trans = tp;
127
128 /*
Dave Chinner437a2552012-11-28 13:01:00 +1100129 * We may pass freeze protection with a transaction. So tell lockdep
Jan Karad9457dc2012-06-12 16:20:39 +0200130 * we released it.
131 */
132 rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
133 1, _THIS_IP_);
134 /*
Christoph Hellwig281627d2012-03-13 08:41:05 +0000135 * We hand off the transaction to the completion thread now, so
136 * clear the flag here.
137 */
138 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
139 return 0;
140}
141
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000142/*
Christoph Hellwig2813d682011-12-18 20:00:12 +0000143 * Update on-disk file size now that data has been written to disk.
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000144 */
Christoph Hellwig281627d2012-03-13 08:41:05 +0000145STATIC int
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000146xfs_setfilesize(
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000147 struct xfs_ioend *ioend)
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000148{
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000149 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000150 struct xfs_trans *tp = ioend->io_append_trans;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000151 xfs_fsize_t isize;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000152
Christoph Hellwig281627d2012-03-13 08:41:05 +0000153 /*
Dave Chinner437a2552012-11-28 13:01:00 +1100154 * The transaction may have been allocated in the I/O submission thread,
155 * thus we need to mark ourselves as beeing in a transaction manually.
156 * Similarly for freeze protection.
Christoph Hellwig281627d2012-03-13 08:41:05 +0000157 */
158 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
Dave Chinner437a2552012-11-28 13:01:00 +1100159 rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
160 0, 1, _THIS_IP_);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000161
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000162 xfs_ilock(ip, XFS_ILOCK_EXCL);
Christoph Hellwig6923e682012-02-29 09:53:49 +0000163 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000164 if (!isize) {
165 xfs_iunlock(ip, XFS_ILOCK_EXCL);
166 xfs_trans_cancel(tp, 0);
167 return 0;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000168 }
169
Christoph Hellwig281627d2012-03-13 08:41:05 +0000170 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
171
172 ip->i_d.di_size = isize;
173 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
174 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
175
176 return xfs_trans_commit(tp, 0);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000177}
178
179/*
Christoph Hellwig209fb872010-07-18 21:17:11 +0000180 * Schedule IO completion handling on the final put of an ioend.
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000181 *
182 * If there is no work to do we might as well call it a day and free the
183 * ioend right now.
Dave Chinnerc626d172009-04-06 18:42:11 +0200184 */
185STATIC void
186xfs_finish_ioend(
Christoph Hellwig209fb872010-07-18 21:17:11 +0000187 struct xfs_ioend *ioend)
Dave Chinnerc626d172009-04-06 18:42:11 +0200188{
189 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000190 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
191
Alain Renaud0d882a32012-05-22 15:56:21 -0500192 if (ioend->io_type == XFS_IO_UNWRITTEN)
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000193 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
Dave Chinner437a2552012-11-28 13:01:00 +1100194 else if (ioend->io_append_trans ||
195 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000196 queue_work(mp->m_data_workqueue, &ioend->io_work);
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000197 else
198 xfs_destroy_ioend(ioend);
Dave Chinnerc626d172009-04-06 18:42:11 +0200199 }
200}
201
202/*
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000203 * IO write completion.
204 */
205STATIC void
206xfs_end_io(
207 struct work_struct *work)
208{
209 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
210 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Dave Chinner69418932010-03-04 00:57:09 +0000211 int error = 0;
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000212
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000213 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Christoph Hellwig810627d2011-11-08 08:56:15 +0000214 ioend->io_error = -EIO;
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000215 goto done;
216 }
217 if (ioend->io_error)
218 goto done;
219
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000220 /*
221 * For unwritten extents we need to issue transactions to convert a
222 * range to normal written extens after the data I/O has finished.
223 */
Alain Renaud0d882a32012-05-22 15:56:21 -0500224 if (ioend->io_type == XFS_IO_UNWRITTEN) {
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000225 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
Dave Chinner437a2552012-11-28 13:01:00 +1100226 ioend->io_size);
227 } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
228 /*
229 * For direct I/O we do not know if we need to allocate blocks
230 * or not so we can't preallocate an append transaction as that
231 * results in nested reservations and log space deadlocks. Hence
232 * allocate the transaction here. While this is sub-optimal and
233 * can block IO completion for some time, we're stuck with doing
234 * it this way until we can pass the ioend to the direct IO
235 * allocation callbacks and avoid nesting that way.
236 */
237 error = xfs_setfilesize_trans_alloc(ioend);
238 if (error)
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000239 goto done;
Dave Chinner437a2552012-11-28 13:01:00 +1100240 error = xfs_setfilesize(ioend);
Christoph Hellwig281627d2012-03-13 08:41:05 +0000241 } else if (ioend->io_append_trans) {
242 error = xfs_setfilesize(ioend);
Christoph Hellwig84803fb2012-02-29 09:53:50 +0000243 } else {
Christoph Hellwig281627d2012-03-13 08:41:05 +0000244 ASSERT(!xfs_ioend_is_append(ioend));
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000245 }
246
Christoph Hellwig04f658e2011-08-24 05:59:25 +0000247done:
Dave Chinner437a2552012-11-28 13:01:00 +1100248 if (error)
249 ioend->io_error = -error;
Christoph Hellwigaa6bf012012-02-29 09:53:48 +0000250 xfs_destroy_ioend(ioend);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000251}
252
253/*
Christoph Hellwig209fb872010-07-18 21:17:11 +0000254 * Call IO completion handling in caller context on the final put of an ioend.
255 */
256STATIC void
257xfs_finish_ioend_sync(
258 struct xfs_ioend *ioend)
259{
260 if (atomic_dec_and_test(&ioend->io_remaining))
261 xfs_end_io(&ioend->io_work);
262}
263
264/*
Christoph Hellwig0829c362005-09-02 16:58:49 +1000265 * Allocate and initialise an IO completion structure.
266 * We need to track unwritten extent write completion here initially.
267 * We'll need to extend this for updating the ondisk inode size later
268 * (vs. incore size).
269 */
270STATIC xfs_ioend_t *
271xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100272 struct inode *inode,
273 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000274{
275 xfs_ioend_t *ioend;
276
277 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
278
279 /*
280 * Set the count to 1 initially, which will prevent an I/O
281 * completion callback from happening before we have started
282 * all the I/O from calling the completion routine too early.
283 */
284 atomic_set(&ioend->io_remaining, 1);
Christoph Hellwigc859cdd2011-08-23 08:28:10 +0000285 ioend->io_isasync = 0;
Christoph Hellwig281627d2012-03-13 08:41:05 +0000286 ioend->io_isdirect = 0;
Nathan Scott7d04a332006-06-09 14:58:38 +1000287 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100288 ioend->io_list = NULL;
289 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000290 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000291 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100292 ioend->io_buffer_tail = NULL;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000293 ioend->io_offset = 0;
294 ioend->io_size = 0;
Christoph Hellwigfb511f22010-07-18 21:17:10 +0000295 ioend->io_iocb = NULL;
296 ioend->io_result = 0;
Christoph Hellwig281627d2012-03-13 08:41:05 +0000297 ioend->io_append_trans = NULL;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000298
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000299 INIT_WORK(&ioend->io_work, xfs_end_io);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000300 return ioend;
301}
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303STATIC int
304xfs_map_blocks(
305 struct inode *inode,
306 loff_t offset,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000307 struct xfs_bmbt_irec *imap,
Christoph Hellwiga206c812010-12-10 08:42:20 +0000308 int type,
309 int nonblocking)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
Christoph Hellwiga206c812010-12-10 08:42:20 +0000311 struct xfs_inode *ip = XFS_I(inode);
312 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000313 ssize_t count = 1 << inode->i_blkbits;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000314 xfs_fileoff_t offset_fsb, end_fsb;
315 int error = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000316 int bmapi_flags = XFS_BMAPI_ENTIRE;
317 int nimaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Christoph Hellwiga206c812010-12-10 08:42:20 +0000319 if (XFS_FORCED_SHUTDOWN(mp))
320 return -XFS_ERROR(EIO);
321
Alain Renaud0d882a32012-05-22 15:56:21 -0500322 if (type == XFS_IO_UNWRITTEN)
Christoph Hellwiga206c812010-12-10 08:42:20 +0000323 bmapi_flags |= XFS_BMAPI_IGSTATE;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000324
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000325 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
326 if (nonblocking)
327 return -XFS_ERROR(EAGAIN);
328 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000329 }
330
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000331 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
332 (ip->i_df.if_flags & XFS_IFEXTENTS));
Dave Chinnerd2c28192012-06-08 15:44:53 +1000333 ASSERT(offset <= mp->m_super->s_maxbytes);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000334
Dave Chinnerd2c28192012-06-08 15:44:53 +1000335 if (offset + count > mp->m_super->s_maxbytes)
336 count = mp->m_super->s_maxbytes - offset;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000337 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
338 offset_fsb = XFS_B_TO_FSBT(mp, offset);
Dave Chinner5c8ed202011-09-18 20:40:45 +0000339 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
340 imap, &nimaps, bmapi_flags);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000341 xfs_iunlock(ip, XFS_ILOCK_SHARED);
342
Christoph Hellwiga206c812010-12-10 08:42:20 +0000343 if (error)
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000344 return -XFS_ERROR(error);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000345
Alain Renaud0d882a32012-05-22 15:56:21 -0500346 if (type == XFS_IO_DELALLOC &&
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000347 (!nimaps || isnullstartblock(imap->br_startblock))) {
Christoph Hellwiga206c812010-12-10 08:42:20 +0000348 error = xfs_iomap_write_allocate(ip, offset, count, imap);
349 if (!error)
350 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000351 return -XFS_ERROR(error);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000352 }
353
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000354#ifdef DEBUG
Alain Renaud0d882a32012-05-22 15:56:21 -0500355 if (type == XFS_IO_UNWRITTEN) {
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000356 ASSERT(nimaps);
357 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
358 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
359 }
360#endif
361 if (nimaps)
362 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
363 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
Christoph Hellwigb8f82a42009-11-14 16:17:22 +0000366STATIC int
Christoph Hellwig558e6892010-04-28 12:28:58 +0000367xfs_imap_valid(
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000368 struct inode *inode,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000369 struct xfs_bmbt_irec *imap,
Christoph Hellwig558e6892010-04-28 12:28:58 +0000370 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Christoph Hellwig558e6892010-04-28 12:28:58 +0000372 offset >>= inode->i_blkbits;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000373
Christoph Hellwig558e6892010-04-28 12:28:58 +0000374 return offset >= imap->br_startoff &&
375 offset < imap->br_startoff + imap->br_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100378/*
379 * BIO completion handler for buffered IO.
380 */
Al Viro782e3b32007-10-12 07:17:47 +0100381STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100382xfs_end_bio(
383 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100384 int error)
385{
386 xfs_ioend_t *ioend = bio->bi_private;
387
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100388 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000389 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100390
391 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100392 bio->bi_private = NULL;
393 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100394 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000395
Christoph Hellwig209fb872010-07-18 21:17:11 +0000396 xfs_finish_ioend(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100397}
398
399STATIC void
400xfs_submit_ioend_bio(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000401 struct writeback_control *wbc,
402 xfs_ioend_t *ioend,
403 struct bio *bio)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100404{
405 atomic_inc(&ioend->io_remaining);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100406 bio->bi_private = ioend;
407 bio->bi_end_io = xfs_end_bio;
Jens Axboe721a9602011-03-09 11:56:30 +0100408 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100409}
410
411STATIC struct bio *
412xfs_alloc_ioend_bio(
413 struct buffer_head *bh)
414{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100415 int nvecs = bio_get_nr_vecs(bh->b_bdev);
Christoph Hellwig221cb252010-12-10 08:42:17 +0000416 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100417
418 ASSERT(bio->bi_private == NULL);
419 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
420 bio->bi_bdev = bh->b_bdev;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100421 return bio;
422}
423
424STATIC void
425xfs_start_buffer_writeback(
426 struct buffer_head *bh)
427{
428 ASSERT(buffer_mapped(bh));
429 ASSERT(buffer_locked(bh));
430 ASSERT(!buffer_delay(bh));
431 ASSERT(!buffer_unwritten(bh));
432
433 mark_buffer_async_write(bh);
434 set_buffer_uptodate(bh);
435 clear_buffer_dirty(bh);
436}
437
438STATIC void
439xfs_start_page_writeback(
440 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100441 int clear_dirty,
442 int buffers)
443{
444 ASSERT(PageLocked(page));
445 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100446 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100447 clear_page_dirty_for_io(page);
448 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100449 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700450 /* If no buffers on the page are to be written, finish it here */
451 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100452 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100453}
454
455static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
456{
457 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
458}
459
460/*
David Chinnerd88992f2006-01-18 13:38:12 +1100461 * Submit all of the bios for all of the ioends we have saved up, covering the
462 * initial writepage page and also any probed pages.
463 *
464 * Because we may have multiple ioends spanning a page, we need to start
465 * writeback on all the buffers before we submit them for I/O. If we mark the
466 * buffers as we got, then we can end up with a page that only has buffers
467 * marked async write and I/O complete on can occur before we mark the other
468 * buffers async write.
469 *
470 * The end result of this is that we trip a bug in end_page_writeback() because
471 * we call it twice for the one page as the code in end_buffer_async_write()
472 * assumes that all buffers on the page are started at the same time.
473 *
474 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000475 * buffer_heads, and then submit them for I/O on the second pass.
Dave Chinner7bf7f352012-11-12 22:09:45 +1100476 *
477 * If @fail is non-zero, it means that we have a situation where some part of
478 * the submission process has failed after we have marked paged for writeback
479 * and unlocked them. In this situation, we need to fail the ioend chain rather
480 * than submit it to IO. This typically only happens on a filesystem shutdown.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100481 */
482STATIC void
483xfs_submit_ioend(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000484 struct writeback_control *wbc,
Dave Chinner7bf7f352012-11-12 22:09:45 +1100485 xfs_ioend_t *ioend,
486 int fail)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100487{
David Chinnerd88992f2006-01-18 13:38:12 +1100488 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100489 xfs_ioend_t *next;
490 struct buffer_head *bh;
491 struct bio *bio;
492 sector_t lastblock = 0;
493
David Chinnerd88992f2006-01-18 13:38:12 +1100494 /* Pass 1 - start writeback */
495 do {
496 next = ioend->io_list;
Christoph Hellwig221cb252010-12-10 08:42:17 +0000497 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
David Chinnerd88992f2006-01-18 13:38:12 +1100498 xfs_start_buffer_writeback(bh);
David Chinnerd88992f2006-01-18 13:38:12 +1100499 } while ((ioend = next) != NULL);
500
501 /* Pass 2 - submit I/O */
502 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100503 do {
504 next = ioend->io_list;
505 bio = NULL;
506
Dave Chinner7bf7f352012-11-12 22:09:45 +1100507 /*
508 * If we are failing the IO now, just mark the ioend with an
509 * error and finish it. This will run IO completion immediately
510 * as there is only one reference to the ioend at this point in
511 * time.
512 */
513 if (fail) {
514 ioend->io_error = -fail;
515 xfs_finish_ioend(ioend);
516 continue;
517 }
518
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100519 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100520
521 if (!bio) {
522 retry:
523 bio = xfs_alloc_ioend_bio(bh);
524 } else if (bh->b_blocknr != lastblock + 1) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000525 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100526 goto retry;
527 }
528
529 if (bio_add_buffer(bio, bh) != bh->b_size) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000530 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100531 goto retry;
532 }
533
534 lastblock = bh->b_blocknr;
535 }
536 if (bio)
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000537 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwig209fb872010-07-18 21:17:11 +0000538 xfs_finish_ioend(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100539 } while ((ioend = next) != NULL);
540}
541
542/*
543 * Cancel submission of all buffer_heads so far in this endio.
544 * Toss the endio too. Only ever called for the initial page
545 * in a writepage request, so only ever one page.
546 */
547STATIC void
548xfs_cancel_ioend(
549 xfs_ioend_t *ioend)
550{
551 xfs_ioend_t *next;
552 struct buffer_head *bh, *next_bh;
553
554 do {
555 next = ioend->io_list;
556 bh = ioend->io_buffer_head;
557 do {
558 next_bh = bh->b_private;
559 clear_buffer_async_write(bh);
560 unlock_buffer(bh);
561 } while ((bh = next_bh) != NULL);
562
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100563 mempool_free(ioend, xfs_ioend_pool);
564 } while ((ioend = next) != NULL);
565}
566
567/*
568 * Test to see if we've been building up a completion structure for
569 * earlier buffers -- if so, we try to append to this ioend if we
570 * can, otherwise we finish off any current ioend and start another.
571 * Return true if we've finished the given ioend.
572 */
573STATIC void
574xfs_add_to_ioend(
575 struct inode *inode,
576 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100577 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100578 unsigned int type,
579 xfs_ioend_t **result,
580 int need_ioend)
581{
582 xfs_ioend_t *ioend = *result;
583
584 if (!ioend || need_ioend || type != ioend->io_type) {
585 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100586
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100587 ioend = xfs_alloc_ioend(inode, type);
588 ioend->io_offset = offset;
589 ioend->io_buffer_head = bh;
590 ioend->io_buffer_tail = bh;
591 if (previous)
592 previous->io_list = ioend;
593 *result = ioend;
594 } else {
595 ioend->io_buffer_tail->b_private = bh;
596 ioend->io_buffer_tail = bh;
597 }
598
599 bh->b_private = NULL;
600 ioend->io_size += bh->b_size;
601}
602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100604xfs_map_buffer(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000605 struct inode *inode,
Nathan Scott87cbc492006-03-14 13:26:43 +1100606 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000607 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000608 xfs_off_t offset)
Nathan Scott87cbc492006-03-14 13:26:43 +1100609{
610 sector_t bn;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000611 struct xfs_mount *m = XFS_I(inode)->i_mount;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000612 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
613 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
Nathan Scott87cbc492006-03-14 13:26:43 +1100614
Christoph Hellwig207d0412010-04-28 12:28:56 +0000615 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
616 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Nathan Scott87cbc492006-03-14 13:26:43 +1100617
Christoph Hellwige5131822010-04-28 12:28:55 +0000618 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000619 ((offset - iomap_offset) >> inode->i_blkbits);
Nathan Scott87cbc492006-03-14 13:26:43 +1100620
Christoph Hellwig046f1682010-04-28 12:28:52 +0000621 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
Nathan Scott87cbc492006-03-14 13:26:43 +1100622
623 bh->b_blocknr = bn;
624 set_buffer_mapped(bh);
625}
626
627STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628xfs_map_at_offset(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000629 struct inode *inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000631 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000632 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633{
Christoph Hellwig207d0412010-04-28 12:28:56 +0000634 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
635 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
Christoph Hellwig207d0412010-04-28 12:28:56 +0000637 xfs_map_buffer(inode, bh, imap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 set_buffer_mapped(bh);
639 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100640 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641}
642
643/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100644 * Test if a given page is suitable for writing as part of an unwritten
645 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100647STATIC int
Dave Chinner6ffc4db2012-04-23 15:58:43 +1000648xfs_check_page_type(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100649 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100650 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100653 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 if (page->mapping && page_has_buffers(page)) {
656 struct buffer_head *bh, *head;
657 int acceptable = 0;
658
659 bh = head = page_buffers(page);
660 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100661 if (buffer_unwritten(bh))
Alain Renaud0d882a32012-05-22 15:56:21 -0500662 acceptable += (type == XFS_IO_UNWRITTEN);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100663 else if (buffer_delay(bh))
Alain Renaud0d882a32012-05-22 15:56:21 -0500664 acceptable += (type == XFS_IO_DELALLOC);
David Chinner2ddee842006-03-22 12:47:40 +1100665 else if (buffer_dirty(bh) && buffer_mapped(bh))
Alain Renaud0d882a32012-05-22 15:56:21 -0500666 acceptable += (type == XFS_IO_OVERWRITE);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100667 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 } while ((bh = bh->b_this_page) != head);
670
671 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100672 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 }
674
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100675 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676}
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678/*
679 * Allocate & map buffers for page given the extent map. Write it out.
680 * except for the original page of a writepage, this is called on
681 * delalloc/unwritten pages only, for the original page it is possible
682 * that the page has no mapping at all.
683 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100684STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685xfs_convert_page(
686 struct inode *inode,
687 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100688 loff_t tindex,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000689 struct xfs_bmbt_irec *imap,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100690 xfs_ioend_t **ioendp,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000691 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100693 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100694 xfs_off_t end_offset;
695 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100696 unsigned int type;
Nathan Scott24e17b52005-05-05 13:33:20 -0700697 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100698 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100699 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100701 if (page->index != tindex)
702 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200703 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100704 goto fail;
705 if (PageWriteback(page))
706 goto fail_unlock_page;
707 if (page->mapping != inode->i_mapping)
708 goto fail_unlock_page;
Dave Chinner6ffc4db2012-04-23 15:58:43 +1000709 if (!xfs_check_page_type(page, (*ioendp)->io_type))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100710 goto fail_unlock_page;
711
Nathan Scott24e17b52005-05-05 13:33:20 -0700712 /*
713 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000714 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100715 *
716 * Derivation:
717 *
718 * End offset is the highest offset that this page should represent.
719 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
720 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
721 * hence give us the correct page_dirty count. On any other page,
722 * it will be zero and in that case we need page_dirty to be the
723 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700724 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100725 end_offset = min_t(unsigned long long,
726 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
727 i_size_read(inode));
728
Dave Chinner480d7462013-05-20 09:51:08 +1000729 /*
730 * If the current map does not span the entire page we are about to try
731 * to write, then give up. The only way we can write a page that spans
732 * multiple mappings in a single writeback iteration is via the
733 * xfs_vm_writepage() function. Data integrity writeback requires the
734 * entire page to be written in a single attempt, otherwise the part of
735 * the page we don't write here doesn't get written as part of the data
736 * integrity sync.
737 *
738 * For normal writeback, we also don't attempt to write partial pages
739 * here as it simply means that write_cache_pages() will see it under
740 * writeback and ignore the page until some point in the future, at
741 * which time this will be the only page in the file that needs
742 * writeback. Hence for more optimal IO patterns, we should always
743 * avoid partial page writeback due to multiple mappings on a page here.
744 */
745 if (!xfs_imap_valid(inode, imap, end_offset))
746 goto fail_unlock_page;
747
Nathan Scott24e17b52005-05-05 13:33:20 -0700748 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100749 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
750 PAGE_CACHE_SIZE);
751 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
752 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 bh = head = page_buffers(page);
755 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100756 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100758 if (!buffer_uptodate(bh))
759 uptodate = 0;
760 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
761 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100763 }
764
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000765 if (buffer_unwritten(bh) || buffer_delay(bh) ||
766 buffer_mapped(bh)) {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100767 if (buffer_unwritten(bh))
Alain Renaud0d882a32012-05-22 15:56:21 -0500768 type = XFS_IO_UNWRITTEN;
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000769 else if (buffer_delay(bh))
Alain Renaud0d882a32012-05-22 15:56:21 -0500770 type = XFS_IO_DELALLOC;
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000771 else
Alain Renaud0d882a32012-05-22 15:56:21 -0500772 type = XFS_IO_OVERWRITE;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100773
Christoph Hellwig558e6892010-04-28 12:28:58 +0000774 if (!xfs_imap_valid(inode, imap, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100775 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100776 continue;
777 }
778
Christoph Hellwigecff71e2010-12-10 08:42:25 +0000779 lock_buffer(bh);
Alain Renaud0d882a32012-05-22 15:56:21 -0500780 if (type != XFS_IO_OVERWRITE)
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000781 xfs_map_at_offset(inode, bh, imap, offset);
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000782 xfs_add_to_ioend(inode, bh, offset, type,
783 ioendp, done);
784
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100785 page_dirty--;
786 count++;
787 } else {
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000788 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100790 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100792 if (uptodate && bh == head)
793 SetPageUptodate(page);
794
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000795 if (count) {
Dave Chinnerefceab12010-08-24 11:44:56 +1000796 if (--wbc->nr_to_write <= 0 &&
797 wbc->sync_mode == WB_SYNC_NONE)
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000798 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 }
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000800 xfs_start_page_writeback(page, !page_dirty, count);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100801
802 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100803 fail_unlock_page:
804 unlock_page(page);
805 fail:
806 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807}
808
809/*
810 * Convert & write out a cluster of pages in the same extent as defined
811 * by mp and following the start page.
812 */
813STATIC void
814xfs_cluster_write(
815 struct inode *inode,
816 pgoff_t tindex,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000817 struct xfs_bmbt_irec *imap,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100818 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 pgoff_t tlast)
821{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100822 struct pagevec pvec;
823 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100825 pagevec_init(&pvec, 0);
826 while (!done && tindex <= tlast) {
827 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
828
829 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100831
832 for (i = 0; i < pagevec_count(&pvec); i++) {
833 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000834 imap, ioendp, wbc);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100835 if (done)
836 break;
837 }
838
839 pagevec_release(&pvec);
840 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
842}
843
Dave Chinner3ed3a432010-03-05 02:00:42 +0000844STATIC void
845xfs_vm_invalidatepage(
846 struct page *page,
Lukas Czernerd47992f2013-05-21 23:17:23 -0400847 unsigned int offset,
848 unsigned int length)
Dave Chinner3ed3a432010-03-05 02:00:42 +0000849{
Lukas Czerner34097df2013-05-21 23:58:01 -0400850 trace_xfs_invalidatepage(page->mapping->host, page, offset,
851 length);
852 block_invalidatepage(page, offset, length);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000853}
854
855/*
856 * If the page has delalloc buffers on it, we need to punch them out before we
857 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
858 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
859 * is done on that same region - the delalloc extent is returned when none is
860 * supposed to be there.
861 *
862 * We prevent this by truncating away the delalloc regions on the page before
863 * invalidating it. Because they are delalloc, we can do this without needing a
864 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
865 * truncation without a transaction as there is no space left for block
866 * reservation (typically why we see a ENOSPC in writeback).
867 *
868 * This is not a performance critical path, so for now just do the punching a
869 * buffer head at a time.
870 */
871STATIC void
872xfs_aops_discard_page(
873 struct page *page)
874{
875 struct inode *inode = page->mapping->host;
876 struct xfs_inode *ip = XFS_I(inode);
877 struct buffer_head *bh, *head;
878 loff_t offset = page_offset(page);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000879
Alain Renaud0d882a32012-05-22 15:56:21 -0500880 if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
Dave Chinner3ed3a432010-03-05 02:00:42 +0000881 goto out_invalidate;
882
Dave Chinnere8c37532010-03-15 02:36:35 +0000883 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
884 goto out_invalidate;
885
Dave Chinner4f107002011-03-07 10:00:35 +1100886 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000887 "page discard on page %p, inode 0x%llx, offset %llu.",
888 page, ip->i_ino, offset);
889
890 xfs_ilock(ip, XFS_ILOCK_EXCL);
891 bh = head = page_buffers(page);
892 do {
Dave Chinner3ed3a432010-03-05 02:00:42 +0000893 int error;
Dave Chinnerc726de42010-11-30 15:14:39 +1100894 xfs_fileoff_t start_fsb;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000895
896 if (!buffer_delay(bh))
897 goto next_buffer;
898
Dave Chinnerc726de42010-11-30 15:14:39 +1100899 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
900 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000901 if (error) {
902 /* something screwed, just bail */
Dave Chinnere8c37532010-03-15 02:36:35 +0000903 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100904 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000905 "page discard unable to remove delalloc mapping.");
Dave Chinnere8c37532010-03-15 02:36:35 +0000906 }
Dave Chinner3ed3a432010-03-05 02:00:42 +0000907 break;
908 }
909next_buffer:
Dave Chinnerc726de42010-11-30 15:14:39 +1100910 offset += 1 << inode->i_blkbits;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000911
912 } while ((bh = bh->b_this_page) != head);
913
914 xfs_iunlock(ip, XFS_ILOCK_EXCL);
915out_invalidate:
Lukas Czernerd47992f2013-05-21 23:17:23 -0400916 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000917 return;
918}
919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920/*
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000921 * Write out a dirty page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 *
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000923 * For delalloc space on the page we need to allocate space and flush it.
924 * For unwritten space on the page we need to start the conversion to
925 * regular allocated space.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000926 * For any other dirty buffer heads on the page we should flush them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928STATIC int
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000929xfs_vm_writepage(
930 struct page *page,
931 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000933 struct inode *inode = page->mapping->host;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100934 struct buffer_head *bh, *head;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000935 struct xfs_bmbt_irec imap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100936 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 loff_t offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100938 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 __uint64_t end_offset;
Christoph Hellwigbd1556a2010-04-28 12:29:00 +0000940 pgoff_t end_index, last_index;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000941 ssize_t len;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000942 int err, imap_valid = 0, uptodate = 1;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000943 int count = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000944 int nonblocking = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
Lukas Czerner34097df2013-05-21 23:58:01 -0400946 trace_xfs_writepage(inode, page, 0, 0);
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000947
Christoph Hellwig20cb52e2010-06-24 09:46:01 +1000948 ASSERT(page_has_buffers(page));
949
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000950 /*
951 * Refuse to write the page out if we are called from reclaim context.
952 *
Christoph Hellwigd4f7a5c2010-06-28 10:34:44 -0400953 * This avoids stack overflows when called from deeply used stacks in
954 * random callers for direct reclaim or memcg reclaim. We explicitly
955 * allow reclaim from kswapd as the stack usage there is relatively low.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000956 *
Mel Gorman94054fa2011-10-31 17:07:45 -0700957 * This should never happen except in the case of a VM regression so
958 * warn about it.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000959 */
Mel Gorman94054fa2011-10-31 17:07:45 -0700960 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
961 PF_MEMALLOC))
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000962 goto redirty;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000963
964 /*
Christoph Hellwig680a6472011-07-08 14:34:05 +0200965 * Given that we do not allow direct reclaim to call us, we should
966 * never be called while in a filesystem transaction.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000967 */
Christoph Hellwig680a6472011-07-08 14:34:05 +0200968 if (WARN_ON(current->flags & PF_FSTRANS))
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000969 goto redirty;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 /* Is this page beyond the end of the file? */
972 offset = i_size_read(inode);
973 end_index = offset >> PAGE_CACHE_SHIFT;
974 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
975 if (page->index >= end_index) {
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -0400976 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
977
978 /*
Jan Karaff9a28f2013-03-14 14:30:54 +0100979 * Skip the page if it is fully outside i_size, e.g. due to a
980 * truncate operation that is in progress. We must redirty the
981 * page so that reclaim stops reclaiming it. Otherwise
982 * xfs_vm_releasepage() is called on it and gets confused.
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -0400983 */
Jan Karaff9a28f2013-03-14 14:30:54 +0100984 if (page->index >= end_index + 1 || offset_into_page == 0)
985 goto redirty;
Christoph Hellwig6b7a03f2012-07-03 12:20:00 -0400986
987 /*
988 * The page straddles i_size. It must be zeroed out on each
989 * and every writepage invocation because it may be mmapped.
990 * "A file is mapped in multiples of the page size. For a file
991 * that is not a multiple of the page size, the remaining
992 * memory is zeroed when mapped, and writes to that region are
993 * not written out to the file."
994 */
995 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 }
997
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100998 end_offset = min_t(unsigned long long,
Christoph Hellwig20cb52e2010-06-24 09:46:01 +1000999 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
1000 offset);
Nathan Scott24e17b52005-05-05 13:33:20 -07001001 len = 1 << inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -07001002
Nathan Scott24e17b52005-05-05 13:33:20 -07001003 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001004 offset = page_offset(page);
Alain Renaud0d882a32012-05-22 15:56:21 -05001005 type = XFS_IO_OVERWRITE;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001006
Christoph Hellwigdbcdde32011-07-08 14:34:14 +02001007 if (wbc->sync_mode == WB_SYNC_NONE)
Christoph Hellwiga206c812010-12-10 08:42:20 +00001008 nonblocking = 1;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 do {
Christoph Hellwig6ac72482010-12-10 08:42:18 +00001011 int new_ioend = 0;
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 if (offset >= end_offset)
1014 break;
1015 if (!buffer_uptodate(bh))
1016 uptodate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
Eric Sandeen3d9b02e2010-06-24 09:45:30 +10001018 /*
Christoph Hellwigece413f2010-11-10 21:39:11 +00001019 * set_page_dirty dirties all buffers in a page, independent
1020 * of their state. The dirty state however is entirely
1021 * meaningless for holes (!mapped && uptodate), so skip
1022 * buffers covering holes here.
Eric Sandeen3d9b02e2010-06-24 09:45:30 +10001023 */
1024 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
Eric Sandeen3d9b02e2010-06-24 09:45:30 +10001025 imap_valid = 0;
1026 continue;
1027 }
1028
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001029 if (buffer_unwritten(bh)) {
Alain Renaud0d882a32012-05-22 15:56:21 -05001030 if (type != XFS_IO_UNWRITTEN) {
1031 type = XFS_IO_UNWRITTEN;
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001032 imap_valid = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001033 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001034 } else if (buffer_delay(bh)) {
Alain Renaud0d882a32012-05-22 15:56:21 -05001035 if (type != XFS_IO_DELALLOC) {
1036 type = XFS_IO_DELALLOC;
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001037 imap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 }
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001039 } else if (buffer_uptodate(bh)) {
Alain Renaud0d882a32012-05-22 15:56:21 -05001040 if (type != XFS_IO_OVERWRITE) {
1041 type = XFS_IO_OVERWRITE;
Christoph Hellwig85da94c2010-12-10 08:42:16 +00001042 imap_valid = 0;
1043 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001044 } else {
Alain Renaud7d0fa3e2012-06-08 15:34:46 -04001045 if (PageUptodate(page))
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001046 ASSERT(buffer_mapped(bh));
Alain Renaud7d0fa3e2012-06-08 15:34:46 -04001047 /*
1048 * This buffer is not uptodate and will not be
1049 * written to disk. Ensure that we will put any
1050 * subsequent writeable buffers into a new
1051 * ioend.
1052 */
1053 imap_valid = 0;
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001054 continue;
1055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001057 if (imap_valid)
1058 imap_valid = xfs_imap_valid(inode, &imap, offset);
1059 if (!imap_valid) {
1060 /*
1061 * If we didn't have a valid mapping then we need to
1062 * put the new mapping into a separate ioend structure.
1063 * This ensures non-contiguous extents always have
1064 * separate ioends, which is particularly important
1065 * for unwritten extent conversion at I/O completion
1066 * time.
1067 */
1068 new_ioend = 1;
1069 err = xfs_map_blocks(inode, offset, &imap, type,
1070 nonblocking);
1071 if (err)
1072 goto error;
1073 imap_valid = xfs_imap_valid(inode, &imap, offset);
1074 }
1075 if (imap_valid) {
Christoph Hellwigecff71e2010-12-10 08:42:25 +00001076 lock_buffer(bh);
Alain Renaud0d882a32012-05-22 15:56:21 -05001077 if (type != XFS_IO_OVERWRITE)
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001078 xfs_map_at_offset(inode, bh, &imap, offset);
1079 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1080 new_ioend);
1081 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001083
1084 if (!iohead)
1085 iohead = ioend;
1086
1087 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 if (uptodate && bh == head)
1090 SetPageUptodate(page);
1091
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001092 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
Dave Chinner7bf7f352012-11-12 22:09:45 +11001094 /* if there is no IO to be submitted for this page, we are done */
1095 if (!ioend)
1096 return 0;
1097
1098 ASSERT(iohead);
1099
1100 /*
1101 * Any errors from this point onwards need tobe reported through the IO
1102 * completion path as we have marked the initial page as under writeback
1103 * and unlocked it.
1104 */
1105 if (imap_valid) {
Christoph Hellwigbd1556a2010-04-28 12:29:00 +00001106 xfs_off_t end_index;
Christoph Hellwig8699bb02010-04-28 12:28:54 +00001107
Christoph Hellwigbd1556a2010-04-28 12:29:00 +00001108 end_index = imap.br_startoff + imap.br_blockcount;
1109
1110 /* to bytes */
1111 end_index <<= inode->i_blkbits;
1112
1113 /* to pages */
1114 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1115
1116 /* check against file size */
1117 if (end_index > last_index)
1118 end_index = last_index;
1119
Christoph Hellwig207d0412010-04-28 12:28:56 +00001120 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +00001121 wbc, end_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 }
1123
Christoph Hellwig281627d2012-03-13 08:41:05 +00001124
Dave Chinner7bf7f352012-11-12 22:09:45 +11001125 /*
1126 * Reserve log space if we might write beyond the on-disk inode size.
1127 */
1128 err = 0;
1129 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1130 err = xfs_setfilesize_trans_alloc(ioend);
1131
1132 xfs_submit_ioend(wbc, iohead, err);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001133
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001134 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
1136error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001137 if (iohead)
1138 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Christoph Hellwigb5420f22010-08-24 11:47:51 +10001140 if (err == -EAGAIN)
1141 goto redirty;
1142
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001143 xfs_aops_discard_page(page);
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001144 ClearPageUptodate(page);
1145 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 return err;
Nathan Scottf51623b2006-03-14 13:26:27 +11001147
Christoph Hellwigb5420f22010-08-24 11:47:51 +10001148redirty:
Nathan Scottf51623b2006-03-14 13:26:27 +11001149 redirty_page_for_writepage(wbc, page);
1150 unlock_page(page);
1151 return 0;
Nathan Scottf51623b2006-03-14 13:26:27 +11001152}
1153
Nathan Scott7d4fb402006-06-09 15:27:16 +10001154STATIC int
1155xfs_vm_writepages(
1156 struct address_space *mapping,
1157 struct writeback_control *wbc)
1158{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001159 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001160 return generic_writepages(mapping, wbc);
1161}
1162
Nathan Scottf51623b2006-03-14 13:26:27 +11001163/*
1164 * Called to move a page into cleanable state - and from there
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001165 * to be released. The page should already be clean. We always
Nathan Scottf51623b2006-03-14 13:26:27 +11001166 * have buffer heads in this call.
1167 *
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001168 * Returns 1 if the page is ok to release, 0 otherwise.
Nathan Scottf51623b2006-03-14 13:26:27 +11001169 */
1170STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001171xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001172 struct page *page,
1173 gfp_t gfp_mask)
1174{
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001175 int delalloc, unwritten;
Nathan Scottf51623b2006-03-14 13:26:27 +11001176
Lukas Czerner34097df2013-05-21 23:58:01 -04001177 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
Nathan Scott238f4c52006-03-17 17:26:25 +11001178
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001179 xfs_count_page_state(page, &delalloc, &unwritten);
Nathan Scottf51623b2006-03-14 13:26:27 +11001180
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001181 if (WARN_ON(delalloc))
1182 return 0;
1183 if (WARN_ON(unwritten))
Nathan Scottf51623b2006-03-14 13:26:27 +11001184 return 0;
1185
Nathan Scottf51623b2006-03-14 13:26:27 +11001186 return try_to_free_buffers(page);
1187}
1188
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001190__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 struct inode *inode,
1192 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 struct buffer_head *bh_result,
1194 int create,
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001195 int direct)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
Christoph Hellwiga206c812010-12-10 08:42:20 +00001197 struct xfs_inode *ip = XFS_I(inode);
1198 struct xfs_mount *mp = ip->i_mount;
1199 xfs_fileoff_t offset_fsb, end_fsb;
1200 int error = 0;
1201 int lockmode = 0;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001202 struct xfs_bmbt_irec imap;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001203 int nimaps = 1;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001204 xfs_off_t offset;
1205 ssize_t size;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001206 int new = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001207
1208 if (XFS_FORCED_SHUTDOWN(mp))
1209 return -XFS_ERROR(EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001211 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001212 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1213 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001214
1215 if (!create && direct && offset >= i_size_read(inode))
1216 return 0;
1217
Dave Chinner507630b2012-03-27 10:34:50 -04001218 /*
1219 * Direct I/O is usually done on preallocated files, so try getting
1220 * a block mapping without an exclusive lock first. For buffered
1221 * writes we already have the exclusive iolock anyway, so avoiding
1222 * a lock roundtrip here by taking the ilock exclusive from the
1223 * beginning is a useful micro optimization.
1224 */
1225 if (create && !direct) {
Christoph Hellwiga206c812010-12-10 08:42:20 +00001226 lockmode = XFS_ILOCK_EXCL;
1227 xfs_ilock(ip, lockmode);
1228 } else {
1229 lockmode = xfs_ilock_map_shared(ip);
1230 }
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001231
Dave Chinnerd2c28192012-06-08 15:44:53 +10001232 ASSERT(offset <= mp->m_super->s_maxbytes);
1233 if (offset + size > mp->m_super->s_maxbytes)
1234 size = mp->m_super->s_maxbytes - offset;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001235 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1236 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1237
Dave Chinner5c8ed202011-09-18 20:40:45 +00001238 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1239 &imap, &nimaps, XFS_BMAPI_ENTIRE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 if (error)
Christoph Hellwiga206c812010-12-10 08:42:20 +00001241 goto out_unlock;
1242
1243 if (create &&
1244 (!nimaps ||
1245 (imap.br_startblock == HOLESTARTBLOCK ||
1246 imap.br_startblock == DELAYSTARTBLOCK))) {
Dave Chinneraff3a9e2012-04-23 15:58:44 +10001247 if (direct || xfs_get_extsz_hint(ip)) {
Dave Chinner507630b2012-03-27 10:34:50 -04001248 /*
1249 * Drop the ilock in preparation for starting the block
1250 * allocation transaction. It will be retaken
1251 * exclusively inside xfs_iomap_write_direct for the
1252 * actual allocation.
1253 */
1254 xfs_iunlock(ip, lockmode);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001255 error = xfs_iomap_write_direct(ip, offset, size,
1256 &imap, nimaps);
Dave Chinner507630b2012-03-27 10:34:50 -04001257 if (error)
1258 return -error;
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001259 new = 1;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001260 } else {
Dave Chinner507630b2012-03-27 10:34:50 -04001261 /*
1262 * Delalloc reservations do not require a transaction,
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001263 * we can go on without dropping the lock here. If we
1264 * are allocating a new delalloc block, make sure that
1265 * we set the new flag so that we mark the buffer new so
1266 * that we know that it is newly allocated if the write
1267 * fails.
Dave Chinner507630b2012-03-27 10:34:50 -04001268 */
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001269 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1270 new = 1;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001271 error = xfs_iomap_write_delay(ip, offset, size, &imap);
Dave Chinner507630b2012-03-27 10:34:50 -04001272 if (error)
1273 goto out_unlock;
1274
1275 xfs_iunlock(ip, lockmode);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001276 }
Christoph Hellwiga206c812010-12-10 08:42:20 +00001277
1278 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1279 } else if (nimaps) {
1280 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
Dave Chinner507630b2012-03-27 10:34:50 -04001281 xfs_iunlock(ip, lockmode);
Christoph Hellwiga206c812010-12-10 08:42:20 +00001282 } else {
1283 trace_xfs_get_blocks_notfound(ip, offset, size);
1284 goto out_unlock;
1285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Christoph Hellwig207d0412010-04-28 12:28:56 +00001287 if (imap.br_startblock != HOLESTARTBLOCK &&
1288 imap.br_startblock != DELAYSTARTBLOCK) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001289 /*
1290 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 * the read case (treat as if we're reading into a hole).
1292 */
Christoph Hellwig207d0412010-04-28 12:28:56 +00001293 if (create || !ISUNWRITTEN(&imap))
1294 xfs_map_buffer(inode, bh_result, &imap, offset);
1295 if (create && ISUNWRITTEN(&imap)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 if (direct)
1297 bh_result->b_private = inode;
1298 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 }
1300 }
1301
Nathan Scottc2536662006-03-29 10:44:40 +10001302 /*
1303 * If this is a realtime file, data may be on a different device.
1304 * to that pointed to from the buffer_head b_bdev currently.
1305 */
Christoph Hellwig046f1682010-04-28 12:28:52 +00001306 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
Nathan Scottc2536662006-03-29 10:44:40 +10001308 /*
David Chinner549054a2007-02-10 18:36:35 +11001309 * If we previously allocated a block out beyond eof and we are now
1310 * coming back to use it then we will need to flag it as new even if it
1311 * has a disk address.
1312 *
1313 * With sub-block writes into unwritten extents we also need to mark
1314 * the buffer as new so that the unwritten parts of the buffer gets
1315 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 */
1317 if (create &&
1318 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001319 (offset >= i_size_read(inode)) ||
Christoph Hellwig207d0412010-04-28 12:28:56 +00001320 (new || ISUNWRITTEN(&imap))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Christoph Hellwig207d0412010-04-28 12:28:56 +00001323 if (imap.br_startblock == DELAYSTARTBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 BUG_ON(direct);
1325 if (create) {
1326 set_buffer_uptodate(bh_result);
1327 set_buffer_mapped(bh_result);
1328 set_buffer_delay(bh_result);
1329 }
1330 }
1331
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001332 /*
1333 * If this is O_DIRECT or the mpage code calling tell them how large
1334 * the mapping is, so that we can avoid repeated get_blocks calls.
1335 */
Nathan Scottc2536662006-03-29 10:44:40 +10001336 if (direct || size > (1 << inode->i_blkbits)) {
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001337 xfs_off_t mapping_size;
Christoph Hellwig9563b3d2010-04-28 12:28:53 +00001338
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001339 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1340 mapping_size <<= inode->i_blkbits;
1341
1342 ASSERT(mapping_size > 0);
1343 if (mapping_size > size)
1344 mapping_size = size;
1345 if (mapping_size > LONG_MAX)
1346 mapping_size = LONG_MAX;
1347
1348 bh_result->b_size = mapping_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 }
1350
1351 return 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001352
1353out_unlock:
1354 xfs_iunlock(ip, lockmode);
1355 return -error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356}
1357
1358int
Nathan Scottc2536662006-03-29 10:44:40 +10001359xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 struct inode *inode,
1361 sector_t iblock,
1362 struct buffer_head *bh_result,
1363 int create)
1364{
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001365 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366}
1367
1368STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001369xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 struct inode *inode,
1371 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 struct buffer_head *bh_result,
1373 int create)
1374{
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001375 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376}
1377
Christoph Hellwig209fb872010-07-18 21:17:11 +00001378/*
1379 * Complete a direct I/O write request.
1380 *
1381 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1382 * need to issue a transaction to convert the range from unwritten to written
1383 * extents. In case this is regular synchronous I/O we just call xfs_end_io
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001384 * to do this and we are done. But in case this was a successful AIO
Christoph Hellwig209fb872010-07-18 21:17:11 +00001385 * request this handler is called from interrupt context, from which we
1386 * can't start transactions. In that case offload the I/O completion to
1387 * the workqueues we also use for buffered I/O completion.
1388 */
Christoph Hellwigf0973862005-09-05 08:22:52 +10001389STATIC void
Christoph Hellwig209fb872010-07-18 21:17:11 +00001390xfs_end_io_direct_write(
1391 struct kiocb *iocb,
1392 loff_t offset,
1393 ssize_t size,
1394 void *private,
1395 int ret,
1396 bool is_async)
Christoph Hellwigf0973862005-09-05 08:22:52 +10001397{
Christoph Hellwig209fb872010-07-18 21:17:11 +00001398 struct xfs_ioend *ioend = iocb->private;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001399
1400 /*
Christoph Hellwig2813d682011-12-18 20:00:12 +00001401 * While the generic direct I/O code updates the inode size, it does
1402 * so only after the end_io handler is called, which means our
1403 * end_io handler thinks the on-disk size is outside the in-core
1404 * size. To prevent this just update it a little bit earlier here.
1405 */
1406 if (offset + size > i_size_read(ioend->io_inode))
1407 i_size_write(ioend->io_inode, offset + size);
1408
1409 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001410 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001411 * completion handler was called. Thus we need to protect
1412 * against double-freeing.
1413 */
1414 iocb->private = NULL;
Christoph Hellwig40e2e972010-07-18 21:17:09 +00001415
Christoph Hellwig209fb872010-07-18 21:17:11 +00001416 ioend->io_offset = offset;
1417 ioend->io_size = size;
Christoph Hellwigc859cdd2011-08-23 08:28:10 +00001418 ioend->io_iocb = iocb;
1419 ioend->io_result = ret;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001420 if (private && size > 0)
Alain Renaud0d882a32012-05-22 15:56:21 -05001421 ioend->io_type = XFS_IO_UNWRITTEN;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001422
1423 if (is_async) {
Christoph Hellwigc859cdd2011-08-23 08:28:10 +00001424 ioend->io_isasync = 1;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001425 xfs_finish_ioend(ioend);
1426 } else {
1427 xfs_finish_ioend_sync(ioend);
1428 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001429}
1430
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001432xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 int rw,
1434 struct kiocb *iocb,
1435 const struct iovec *iov,
1436 loff_t offset,
1437 unsigned long nr_segs)
1438{
Christoph Hellwig209fb872010-07-18 21:17:11 +00001439 struct inode *inode = iocb->ki_filp->f_mapping->host;
1440 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
Christoph Hellwig281627d2012-03-13 08:41:05 +00001441 struct xfs_ioend *ioend = NULL;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001442 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Christoph Hellwig209fb872010-07-18 21:17:11 +00001444 if (rw & WRITE) {
Christoph Hellwig281627d2012-03-13 08:41:05 +00001445 size_t size = iov_length(iov, nr_segs);
1446
1447 /*
Dave Chinner437a2552012-11-28 13:01:00 +11001448 * We cannot preallocate a size update transaction here as we
1449 * don't know whether allocation is necessary or not. Hence we
1450 * can only tell IO completion that one is necessary if we are
1451 * not doing unwritten extent conversion.
Christoph Hellwig281627d2012-03-13 08:41:05 +00001452 */
Alain Renaud0d882a32012-05-22 15:56:21 -05001453 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
Dave Chinner437a2552012-11-28 13:01:00 +11001454 if (offset + size > XFS_I(inode)->i_d.di_size)
Christoph Hellwig281627d2012-03-13 08:41:05 +00001455 ioend->io_isdirect = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001457 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1458 offset, nr_segs,
1459 xfs_get_blocks_direct,
1460 xfs_end_io_direct_write, NULL, 0);
Christoph Hellwig209fb872010-07-18 21:17:11 +00001461 if (ret != -EIOCBQUEUED && iocb->private)
Dave Chinner437a2552012-11-28 13:01:00 +11001462 goto out_destroy_ioend;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001463 } else {
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001464 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1465 offset, nr_segs,
1466 xfs_get_blocks_direct,
1467 NULL, NULL, 0);
Christoph Hellwig209fb872010-07-18 21:17:11 +00001468 }
Christoph Hellwig5fe878a2009-12-15 16:47:50 -08001469
Christoph Hellwigf0973862005-09-05 08:22:52 +10001470 return ret;
Christoph Hellwig281627d2012-03-13 08:41:05 +00001471
Christoph Hellwig281627d2012-03-13 08:41:05 +00001472out_destroy_ioend:
1473 xfs_destroy_ioend(ioend);
1474 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475}
1476
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001477/*
1478 * Punch out the delalloc blocks we have already allocated.
1479 *
1480 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1481 * as the page is still locked at this point.
1482 */
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001483STATIC void
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001484xfs_vm_kill_delalloc_range(
1485 struct inode *inode,
1486 loff_t start,
1487 loff_t end)
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001488{
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001489 struct xfs_inode *ip = XFS_I(inode);
1490 xfs_fileoff_t start_fsb;
1491 xfs_fileoff_t end_fsb;
1492 int error;
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001493
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001494 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1495 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1496 if (end_fsb <= start_fsb)
1497 return;
Dave Chinnerc726de42010-11-30 15:14:39 +11001498
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001499 xfs_ilock(ip, XFS_ILOCK_EXCL);
1500 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1501 end_fsb - start_fsb);
1502 if (error) {
1503 /* something screwed, just bail */
1504 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1505 xfs_alert(ip->i_mount,
1506 "xfs_vm_write_failed: unable to clean up ino %lld",
1507 ip->i_ino);
Dave Chinnerc726de42010-11-30 15:14:39 +11001508 }
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001509 }
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001510 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001511}
1512
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001513STATIC void
1514xfs_vm_write_failed(
1515 struct inode *inode,
1516 struct page *page,
1517 loff_t pos,
1518 unsigned len)
1519{
Jie Liu58e59852013-07-16 13:11:16 +08001520 loff_t block_offset;
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001521 loff_t block_start;
1522 loff_t block_end;
1523 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1524 loff_t to = from + len;
1525 struct buffer_head *bh, *head;
1526
Jie Liu58e59852013-07-16 13:11:16 +08001527 /*
1528 * The request pos offset might be 32 or 64 bit, this is all fine
1529 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1530 * platform, the high 32-bit will be masked off if we evaluate the
1531 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1532 * 0xfffff000 as an unsigned long, hence the result is incorrect
1533 * which could cause the following ASSERT failed in most cases.
1534 * In order to avoid this, we can evaluate the block_offset of the
1535 * start of the page by using shifts rather than masks the mismatch
1536 * problem.
1537 */
1538 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1539
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001540 ASSERT(block_offset + from == pos);
1541
1542 head = page_buffers(page);
1543 block_start = 0;
1544 for (bh = head; bh != head || !block_start;
1545 bh = bh->b_this_page, block_start = block_end,
1546 block_offset += bh->b_size) {
1547 block_end = block_start + bh->b_size;
1548
1549 /* skip buffers before the write */
1550 if (block_end <= from)
1551 continue;
1552
1553 /* if the buffer is after the write, we're done */
1554 if (block_start >= to)
1555 break;
1556
1557 if (!buffer_delay(bh))
1558 continue;
1559
1560 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1561 continue;
1562
1563 xfs_vm_kill_delalloc_range(inode, block_offset,
1564 block_offset + bh->b_size);
1565 }
1566
1567}
1568
1569/*
1570 * This used to call block_write_begin(), but it unlocks and releases the page
1571 * on error, and we need that page to be able to punch stale delalloc blocks out
1572 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1573 * the appropriate point.
1574 */
Nathan Scottf51623b2006-03-14 13:26:27 +11001575STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001576xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001577 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001578 struct address_space *mapping,
1579 loff_t pos,
1580 unsigned len,
1581 unsigned flags,
1582 struct page **pagep,
1583 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001584{
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001585 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1586 struct page *page;
1587 int status;
Christoph Hellwig155130a2010-06-04 11:29:58 +02001588
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001589 ASSERT(len <= PAGE_CACHE_SIZE);
1590
1591 page = grab_cache_page_write_begin(mapping, index,
1592 flags | AOP_FLAG_NOFS);
1593 if (!page)
1594 return -ENOMEM;
1595
1596 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1597 if (unlikely(status)) {
1598 struct inode *inode = mapping->host;
1599
1600 xfs_vm_write_failed(inode, page, pos, len);
1601 unlock_page(page);
1602
1603 if (pos + len > i_size_read(inode))
1604 truncate_pagecache(inode, pos + len, i_size_read(inode));
1605
1606 page_cache_release(page);
1607 page = NULL;
1608 }
1609
1610 *pagep = page;
1611 return status;
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001612}
Christoph Hellwig155130a2010-06-04 11:29:58 +02001613
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001614/*
1615 * On failure, we only need to kill delalloc blocks beyond EOF because they
1616 * will never be written. For blocks within EOF, generic_write_end() zeros them
1617 * so they are safe to leave alone and be written with all the other valid data.
1618 */
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001619STATIC int
1620xfs_vm_write_end(
1621 struct file *file,
1622 struct address_space *mapping,
1623 loff_t pos,
1624 unsigned len,
1625 unsigned copied,
1626 struct page *page,
1627 void *fsdata)
1628{
1629 int ret;
1630
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001631 ASSERT(len <= PAGE_CACHE_SIZE);
1632
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001633 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
Dave Chinnerd3bc8152012-04-27 19:45:21 +10001634 if (unlikely(ret < len)) {
1635 struct inode *inode = mapping->host;
1636 size_t isize = i_size_read(inode);
1637 loff_t to = pos + len;
1638
1639 if (to > isize) {
1640 truncate_pagecache(inode, to, isize);
1641 xfs_vm_kill_delalloc_range(inode, isize, to);
1642 }
1643 }
Christoph Hellwig155130a2010-06-04 11:29:58 +02001644 return ret;
Nathan Scottf51623b2006-03-14 13:26:27 +11001645}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
1647STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001648xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 struct address_space *mapping,
1650 sector_t block)
1651{
1652 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001653 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10001655 trace_xfs_vm_bmap(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001656 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Dave Chinner4bc1ea62012-11-12 22:53:56 +11001657 filemap_write_and_wait(mapping);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001658 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001659 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
1661
1662STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001663xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 struct file *unused,
1665 struct page *page)
1666{
Nathan Scottc2536662006-03-29 10:44:40 +10001667 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668}
1669
1670STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001671xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 struct file *unused,
1673 struct address_space *mapping,
1674 struct list_head *pages,
1675 unsigned nr_pages)
1676{
Nathan Scottc2536662006-03-29 10:44:40 +10001677 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678}
1679
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001680const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001681 .readpage = xfs_vm_readpage,
1682 .readpages = xfs_vm_readpages,
1683 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001684 .writepages = xfs_vm_writepages,
Nathan Scott238f4c52006-03-17 17:26:25 +11001685 .releasepage = xfs_vm_releasepage,
1686 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001687 .write_begin = xfs_vm_write_begin,
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001688 .write_end = xfs_vm_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001689 .bmap = xfs_vm_bmap,
1690 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001691 .migratepage = buffer_migrate_page,
Hisashi Hifumibddaafa2009-03-29 09:53:38 +02001692 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001693 .error_remove_page = generic_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694};