| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved. | 
| Steven Whitehouse | 3a8a9a1 | 2006-05-18 15:09:15 -0400 | [diff] [blame] | 3 |  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved. | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 4 |  * | 
 | 5 |  * This copyrighted material is made available to anyone wishing to use, | 
 | 6 |  * modify, copy, or redistribute it subject to the terms and conditions | 
| Steven Whitehouse | e9fc2aa | 2006-09-01 11:05:15 -0400 | [diff] [blame] | 7 |  * of the GNU General Public License version 2. | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 8 |  */ | 
 | 9 |  | 
 | 10 | #include <linux/sched.h> | 
 | 11 | #include <linux/slab.h> | 
 | 12 | #include <linux/spinlock.h> | 
 | 13 | #include <linux/completion.h> | 
 | 14 | #include <linux/buffer_head.h> | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 15 | #include <linux/gfs2_ondisk.h> | 
| Fabio Massimo Di Nitto | 7d30859 | 2006-09-19 07:56:29 +0200 | [diff] [blame] | 16 | #include <linux/lm_interface.h> | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 17 |  | 
 | 18 | #include "gfs2.h" | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 19 | #include "incore.h" | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 20 | #include "bmap.h" | 
 | 21 | #include "glock.h" | 
 | 22 | #include "glops.h" | 
 | 23 | #include "inode.h" | 
 | 24 | #include "log.h" | 
 | 25 | #include "meta_io.h" | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 26 | #include "recovery.h" | 
 | 27 | #include "rgrp.h" | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 28 | #include "util.h" | 
| Steven Whitehouse | ddacfaf | 2006-10-03 11:10:41 -0400 | [diff] [blame] | 29 | #include "trans.h" | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 30 |  | 
| Steven Whitehouse | ddacfaf | 2006-10-03 11:10:41 -0400 | [diff] [blame] | 31 | /** | 
 | 32 |  * ail_empty_gl - remove all buffers for a given lock from the AIL | 
 | 33 |  * @gl: the glock | 
 | 34 |  * | 
 | 35 |  * None of the buffers should be dirty, locked, or pinned. | 
 | 36 |  */ | 
 | 37 |  | 
 | 38 | static void gfs2_ail_empty_gl(struct gfs2_glock *gl) | 
 | 39 | { | 
 | 40 | 	struct gfs2_sbd *sdp = gl->gl_sbd; | 
 | 41 | 	unsigned int blocks; | 
 | 42 | 	struct list_head *head = &gl->gl_ail_list; | 
 | 43 | 	struct gfs2_bufdata *bd; | 
 | 44 | 	struct buffer_head *bh; | 
 | 45 | 	u64 blkno; | 
 | 46 | 	int error; | 
 | 47 |  | 
 | 48 | 	blocks = atomic_read(&gl->gl_ail_count); | 
 | 49 | 	if (!blocks) | 
 | 50 | 		return; | 
 | 51 |  | 
 | 52 | 	error = gfs2_trans_begin(sdp, 0, blocks); | 
 | 53 | 	if (gfs2_assert_withdraw(sdp, !error)) | 
 | 54 | 		return; | 
 | 55 |  | 
 | 56 | 	gfs2_log_lock(sdp); | 
 | 57 | 	while (!list_empty(head)) { | 
 | 58 | 		bd = list_entry(head->next, struct gfs2_bufdata, | 
 | 59 | 				bd_ail_gl_list); | 
 | 60 | 		bh = bd->bd_bh; | 
 | 61 | 		blkno = bh->b_blocknr; | 
 | 62 | 		gfs2_assert_withdraw(sdp, !buffer_busy(bh)); | 
 | 63 |  | 
 | 64 | 		bd->bd_ail = NULL; | 
 | 65 | 		list_del(&bd->bd_ail_st_list); | 
 | 66 | 		list_del(&bd->bd_ail_gl_list); | 
 | 67 | 		atomic_dec(&gl->gl_ail_count); | 
 | 68 | 		brelse(bh); | 
 | 69 | 		gfs2_log_unlock(sdp); | 
 | 70 |  | 
 | 71 | 		gfs2_trans_add_revoke(sdp, blkno); | 
 | 72 |  | 
 | 73 | 		gfs2_log_lock(sdp); | 
 | 74 | 	} | 
 | 75 | 	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); | 
 | 76 | 	gfs2_log_unlock(sdp); | 
 | 77 |  | 
 | 78 | 	gfs2_trans_end(sdp); | 
 | 79 | 	gfs2_log_flush(sdp, NULL); | 
 | 80 | } | 
| Steven Whitehouse | ba7f729 | 2006-07-26 11:27:10 -0400 | [diff] [blame] | 81 |  | 
 | 82 | /** | 
 | 83 |  * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock | 
 | 84 |  * @gl: the glock | 
 | 85 |  * | 
 | 86 |  */ | 
 | 87 |  | 
 | 88 | static void gfs2_pte_inval(struct gfs2_glock *gl) | 
 | 89 | { | 
 | 90 | 	struct gfs2_inode *ip; | 
 | 91 | 	struct inode *inode; | 
 | 92 |  | 
 | 93 | 	ip = gl->gl_object; | 
 | 94 | 	inode = &ip->i_inode; | 
 | 95 | 	if (!ip || !S_ISREG(ip->i_di.di_mode)) | 
 | 96 | 		return; | 
 | 97 |  | 
 | 98 | 	if (!test_bit(GIF_PAGED, &ip->i_flags)) | 
 | 99 | 		return; | 
 | 100 |  | 
 | 101 | 	unmap_shared_mapping_range(inode->i_mapping, 0, 0); | 
 | 102 |  | 
 | 103 | 	if (test_bit(GIF_SW_PAGED, &ip->i_flags)) | 
 | 104 | 		set_bit(GLF_DIRTY, &gl->gl_flags); | 
 | 105 |  | 
 | 106 | 	clear_bit(GIF_SW_PAGED, &ip->i_flags); | 
 | 107 | } | 
 | 108 |  | 
 | 109 | /** | 
 | 110 |  * gfs2_page_inval - Invalidate all pages associated with a glock | 
 | 111 |  * @gl: the glock | 
 | 112 |  * | 
 | 113 |  */ | 
 | 114 |  | 
 | 115 | static void gfs2_page_inval(struct gfs2_glock *gl) | 
 | 116 | { | 
 | 117 | 	struct gfs2_inode *ip; | 
 | 118 | 	struct inode *inode; | 
 | 119 |  | 
 | 120 | 	ip = gl->gl_object; | 
 | 121 | 	inode = &ip->i_inode; | 
 | 122 | 	if (!ip || !S_ISREG(ip->i_di.di_mode)) | 
 | 123 | 		return; | 
 | 124 |  | 
 | 125 | 	truncate_inode_pages(inode->i_mapping, 0); | 
 | 126 | 	gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages); | 
 | 127 | 	clear_bit(GIF_PAGED, &ip->i_flags); | 
 | 128 | } | 
 | 129 |  | 
 | 130 | /** | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 131 |  * gfs2_page_wait - Wait for writeback of data | 
| Steven Whitehouse | ba7f729 | 2006-07-26 11:27:10 -0400 | [diff] [blame] | 132 |  * @gl: the glock | 
| Steven Whitehouse | ba7f729 | 2006-07-26 11:27:10 -0400 | [diff] [blame] | 133 |  * | 
 | 134 |  * Syncs data (not metadata) for a regular file. | 
 | 135 |  * No-op for all other types. | 
 | 136 |  */ | 
 | 137 |  | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 138 | static void gfs2_page_wait(struct gfs2_glock *gl) | 
| Steven Whitehouse | ba7f729 | 2006-07-26 11:27:10 -0400 | [diff] [blame] | 139 | { | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 140 | 	struct gfs2_inode *ip = gl->gl_object; | 
 | 141 | 	struct inode *inode = &ip->i_inode; | 
 | 142 | 	struct address_space *mapping = inode->i_mapping; | 
 | 143 | 	int error; | 
| Steven Whitehouse | ba7f729 | 2006-07-26 11:27:10 -0400 | [diff] [blame] | 144 |  | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 145 | 	if (!S_ISREG(ip->i_di.di_mode)) | 
| Steven Whitehouse | ba7f729 | 2006-07-26 11:27:10 -0400 | [diff] [blame] | 146 | 		return; | 
 | 147 |  | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 148 | 	error = filemap_fdatawait(mapping); | 
| Steven Whitehouse | ba7f729 | 2006-07-26 11:27:10 -0400 | [diff] [blame] | 149 |  | 
 | 150 | 	/* Put back any errors cleared by filemap_fdatawait() | 
 | 151 | 	   so they can be caught by someone who can pass them | 
 | 152 | 	   up to user space. */ | 
 | 153 |  | 
 | 154 | 	if (error == -ENOSPC) | 
 | 155 | 		set_bit(AS_ENOSPC, &mapping->flags); | 
 | 156 | 	else if (error) | 
 | 157 | 		set_bit(AS_EIO, &mapping->flags); | 
 | 158 |  | 
 | 159 | } | 
 | 160 |  | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 161 | static void gfs2_page_writeback(struct gfs2_glock *gl) | 
 | 162 | { | 
 | 163 | 	struct gfs2_inode *ip = gl->gl_object; | 
 | 164 | 	struct inode *inode = &ip->i_inode; | 
 | 165 | 	struct address_space *mapping = inode->i_mapping; | 
 | 166 |  | 
 | 167 | 	if (!S_ISREG(ip->i_di.di_mode)) | 
 | 168 | 		return; | 
 | 169 |  | 
 | 170 | 	filemap_fdatawrite(mapping); | 
 | 171 | } | 
 | 172 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 173 | /** | 
 | 174 |  * meta_go_sync - sync out the metadata for this glock | 
 | 175 |  * @gl: the glock | 
 | 176 |  * @flags: DIO_* | 
 | 177 |  * | 
 | 178 |  * Called when demoting or unlocking an EX glock.  We must flush | 
 | 179 |  * to disk all dirty buffers/pages relating to this glock, and must not | 
 | 180 |  * not return to caller to demote/unlock the glock until I/O is complete. | 
 | 181 |  */ | 
 | 182 |  | 
 | 183 | static void meta_go_sync(struct gfs2_glock *gl, int flags) | 
 | 184 | { | 
 | 185 | 	if (!(flags & DIO_METADATA)) | 
 | 186 | 		return; | 
 | 187 |  | 
 | 188 | 	if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { | 
| Steven Whitehouse | b09e593 | 2006-04-07 11:17:32 -0400 | [diff] [blame] | 189 | 		gfs2_log_flush(gl->gl_sbd, gl); | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 190 | 		gfs2_meta_sync(gl); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 191 | 		if (flags & DIO_RELEASE) | 
 | 192 | 			gfs2_ail_empty_gl(gl); | 
 | 193 | 	} | 
 | 194 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 195 | } | 
 | 196 |  | 
 | 197 | /** | 
 | 198 |  * meta_go_inval - invalidate the metadata for this glock | 
 | 199 |  * @gl: the glock | 
 | 200 |  * @flags: | 
 | 201 |  * | 
 | 202 |  */ | 
 | 203 |  | 
 | 204 | static void meta_go_inval(struct gfs2_glock *gl, int flags) | 
 | 205 | { | 
 | 206 | 	if (!(flags & DIO_METADATA)) | 
 | 207 | 		return; | 
 | 208 |  | 
 | 209 | 	gfs2_meta_inval(gl); | 
 | 210 | 	gl->gl_vn++; | 
 | 211 | } | 
 | 212 |  | 
 | 213 | /** | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 214 |  * inode_go_xmote_th - promote/demote a glock | 
 | 215 |  * @gl: the glock | 
 | 216 |  * @state: the requested state | 
 | 217 |  * @flags: | 
 | 218 |  * | 
 | 219 |  */ | 
 | 220 |  | 
 | 221 | static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state, | 
 | 222 | 			      int flags) | 
 | 223 | { | 
 | 224 | 	if (gl->gl_state != LM_ST_UNLOCKED) | 
 | 225 | 		gfs2_pte_inval(gl); | 
 | 226 | 	gfs2_glock_xmote_th(gl, state, flags); | 
 | 227 | } | 
 | 228 |  | 
 | 229 | /** | 
 | 230 |  * inode_go_xmote_bh - After promoting/demoting a glock | 
 | 231 |  * @gl: the glock | 
 | 232 |  * | 
 | 233 |  */ | 
 | 234 |  | 
 | 235 | static void inode_go_xmote_bh(struct gfs2_glock *gl) | 
 | 236 | { | 
 | 237 | 	struct gfs2_holder *gh = gl->gl_req_gh; | 
 | 238 | 	struct buffer_head *bh; | 
 | 239 | 	int error; | 
 | 240 |  | 
 | 241 | 	if (gl->gl_state != LM_ST_UNLOCKED && | 
 | 242 | 	    (!gh || !(gh->gh_flags & GL_SKIP))) { | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 243 | 		error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 244 | 		if (!error) | 
 | 245 | 			brelse(bh); | 
 | 246 | 	} | 
 | 247 | } | 
 | 248 |  | 
 | 249 | /** | 
 | 250 |  * inode_go_drop_th - unlock a glock | 
 | 251 |  * @gl: the glock | 
 | 252 |  * | 
 | 253 |  * Invoked from rq_demote(). | 
 | 254 |  * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long) | 
 | 255 |  * is being purged from our node's glock cache; we're dropping lock. | 
 | 256 |  */ | 
 | 257 |  | 
 | 258 | static void inode_go_drop_th(struct gfs2_glock *gl) | 
 | 259 | { | 
 | 260 | 	gfs2_pte_inval(gl); | 
 | 261 | 	gfs2_glock_drop_th(gl); | 
 | 262 | } | 
 | 263 |  | 
 | 264 | /** | 
 | 265 |  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock | 
 | 266 |  * @gl: the glock protecting the inode | 
 | 267 |  * @flags: | 
 | 268 |  * | 
 | 269 |  */ | 
 | 270 |  | 
 | 271 | static void inode_go_sync(struct gfs2_glock *gl, int flags) | 
 | 272 | { | 
 | 273 | 	int meta = (flags & DIO_METADATA); | 
 | 274 | 	int data = (flags & DIO_DATA); | 
 | 275 |  | 
 | 276 | 	if (test_bit(GLF_DIRTY, &gl->gl_flags)) { | 
 | 277 | 		if (meta && data) { | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 278 | 			gfs2_page_writeback(gl); | 
| Steven Whitehouse | b09e593 | 2006-04-07 11:17:32 -0400 | [diff] [blame] | 279 | 			gfs2_log_flush(gl->gl_sbd, gl); | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 280 | 			gfs2_meta_sync(gl); | 
 | 281 | 			gfs2_page_wait(gl); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 282 | 			clear_bit(GLF_DIRTY, &gl->gl_flags); | 
 | 283 | 		} else if (meta) { | 
| Steven Whitehouse | b09e593 | 2006-04-07 11:17:32 -0400 | [diff] [blame] | 284 | 			gfs2_log_flush(gl->gl_sbd, gl); | 
| Steven Whitehouse | 7276b3b | 2006-09-21 17:05:23 -0400 | [diff] [blame] | 285 | 			gfs2_meta_sync(gl); | 
 | 286 | 		} else if (data) { | 
 | 287 | 			gfs2_page_writeback(gl); | 
 | 288 | 			gfs2_page_wait(gl); | 
 | 289 | 		} | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 290 | 		if (flags & DIO_RELEASE) | 
 | 291 | 			gfs2_ail_empty_gl(gl); | 
 | 292 | 	} | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 293 | } | 
 | 294 |  | 
 | 295 | /** | 
 | 296 |  * inode_go_inval - prepare a inode glock to be released | 
 | 297 |  * @gl: the glock | 
 | 298 |  * @flags: | 
 | 299 |  * | 
 | 300 |  */ | 
 | 301 |  | 
 | 302 | static void inode_go_inval(struct gfs2_glock *gl, int flags) | 
 | 303 | { | 
 | 304 | 	int meta = (flags & DIO_METADATA); | 
 | 305 | 	int data = (flags & DIO_DATA); | 
 | 306 |  | 
 | 307 | 	if (meta) { | 
 | 308 | 		gfs2_meta_inval(gl); | 
 | 309 | 		gl->gl_vn++; | 
 | 310 | 	} | 
 | 311 | 	if (data) | 
 | 312 | 		gfs2_page_inval(gl); | 
 | 313 | } | 
 | 314 |  | 
 | 315 | /** | 
 | 316 |  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock | 
 | 317 |  * @gl: the glock | 
 | 318 |  * | 
 | 319 |  * Returns: 1 if it's ok | 
 | 320 |  */ | 
 | 321 |  | 
 | 322 | static int inode_go_demote_ok(struct gfs2_glock *gl) | 
 | 323 | { | 
 | 324 | 	struct gfs2_sbd *sdp = gl->gl_sbd; | 
 | 325 | 	int demote = 0; | 
 | 326 |  | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 327 | 	if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages) | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 328 | 		demote = 1; | 
 | 329 | 	else if (!sdp->sd_args.ar_localcaching && | 
 | 330 | 		 time_after_eq(jiffies, gl->gl_stamp + | 
 | 331 | 			       gfs2_tune_get(sdp, gt_demote_secs) * HZ)) | 
 | 332 | 		demote = 1; | 
 | 333 |  | 
 | 334 | 	return demote; | 
 | 335 | } | 
 | 336 |  | 
 | 337 | /** | 
 | 338 |  * inode_go_lock - operation done after an inode lock is locked by a process | 
 | 339 |  * @gl: the glock | 
 | 340 |  * @flags: | 
 | 341 |  * | 
 | 342 |  * Returns: errno | 
 | 343 |  */ | 
 | 344 |  | 
 | 345 | static int inode_go_lock(struct gfs2_holder *gh) | 
 | 346 | { | 
 | 347 | 	struct gfs2_glock *gl = gh->gh_gl; | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 348 | 	struct gfs2_inode *ip = gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 349 | 	int error = 0; | 
 | 350 |  | 
 | 351 | 	if (!ip) | 
 | 352 | 		return 0; | 
 | 353 |  | 
 | 354 | 	if (ip->i_vn != gl->gl_vn) { | 
 | 355 | 		error = gfs2_inode_refresh(ip); | 
 | 356 | 		if (error) | 
 | 357 | 			return error; | 
 | 358 | 		gfs2_inode_attr_in(ip); | 
 | 359 | 	} | 
 | 360 |  | 
 | 361 | 	if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && | 
 | 362 | 	    (gl->gl_state == LM_ST_EXCLUSIVE) && | 
 | 363 | 	    (gh->gh_flags & GL_LOCAL_EXCL)) | 
 | 364 | 		error = gfs2_truncatei_resume(ip); | 
 | 365 |  | 
 | 366 | 	return error; | 
 | 367 | } | 
 | 368 |  | 
 | 369 | /** | 
 | 370 |  * inode_go_unlock - operation done before an inode lock is unlocked by a | 
 | 371 |  *		     process | 
 | 372 |  * @gl: the glock | 
 | 373 |  * @flags: | 
 | 374 |  * | 
 | 375 |  */ | 
 | 376 |  | 
 | 377 | static void inode_go_unlock(struct gfs2_holder *gh) | 
 | 378 | { | 
 | 379 | 	struct gfs2_glock *gl = gh->gh_gl; | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 380 | 	struct gfs2_inode *ip = gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 381 |  | 
| Steven Whitehouse | 75d3b81 | 2006-09-04 11:41:31 -0400 | [diff] [blame] | 382 | 	if (ip == NULL) | 
 | 383 | 		return; | 
 | 384 | 	if (test_bit(GLF_DIRTY, &gl->gl_flags)) | 
 | 385 | 		gfs2_inode_attr_in(ip); | 
 | 386 | 	gfs2_meta_cache_flush(ip); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 387 | } | 
 | 388 |  | 
 | 389 | /** | 
 | 390 |  * inode_greedy - | 
 | 391 |  * @gl: the glock | 
 | 392 |  * | 
 | 393 |  */ | 
 | 394 |  | 
 | 395 | static void inode_greedy(struct gfs2_glock *gl) | 
 | 396 | { | 
 | 397 | 	struct gfs2_sbd *sdp = gl->gl_sbd; | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 398 | 	struct gfs2_inode *ip = gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 399 | 	unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum); | 
 | 400 | 	unsigned int max = gfs2_tune_get(sdp, gt_greedy_max); | 
 | 401 | 	unsigned int new_time; | 
 | 402 |  | 
 | 403 | 	spin_lock(&ip->i_spin); | 
 | 404 |  | 
 | 405 | 	if (time_after(ip->i_last_pfault + quantum, jiffies)) { | 
 | 406 | 		new_time = ip->i_greedy + quantum; | 
 | 407 | 		if (new_time > max) | 
 | 408 | 			new_time = max; | 
 | 409 | 	} else { | 
 | 410 | 		new_time = ip->i_greedy - quantum; | 
 | 411 | 		if (!new_time || new_time > max) | 
 | 412 | 			new_time = 1; | 
 | 413 | 	} | 
 | 414 |  | 
 | 415 | 	ip->i_greedy = new_time; | 
 | 416 |  | 
 | 417 | 	spin_unlock(&ip->i_spin); | 
 | 418 |  | 
| Steven Whitehouse | feaa7bb | 2006-06-14 15:32:57 -0400 | [diff] [blame] | 419 | 	iput(&ip->i_inode); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 420 | } | 
 | 421 |  | 
 | 422 | /** | 
 | 423 |  * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock | 
 | 424 |  * @gl: the glock | 
 | 425 |  * | 
 | 426 |  * Returns: 1 if it's ok | 
 | 427 |  */ | 
 | 428 |  | 
 | 429 | static int rgrp_go_demote_ok(struct gfs2_glock *gl) | 
 | 430 | { | 
 | 431 | 	return !gl->gl_aspace->i_mapping->nrpages; | 
 | 432 | } | 
 | 433 |  | 
 | 434 | /** | 
 | 435 |  * rgrp_go_lock - operation done after an rgrp lock is locked by | 
 | 436 |  *    a first holder on this node. | 
 | 437 |  * @gl: the glock | 
 | 438 |  * @flags: | 
 | 439 |  * | 
 | 440 |  * Returns: errno | 
 | 441 |  */ | 
 | 442 |  | 
 | 443 | static int rgrp_go_lock(struct gfs2_holder *gh) | 
 | 444 | { | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 445 | 	return gfs2_rgrp_bh_get(gh->gh_gl->gl_object); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 446 | } | 
 | 447 |  | 
 | 448 | /** | 
 | 449 |  * rgrp_go_unlock - operation done before an rgrp lock is unlocked by | 
 | 450 |  *    a last holder on this node. | 
 | 451 |  * @gl: the glock | 
 | 452 |  * @flags: | 
 | 453 |  * | 
 | 454 |  */ | 
 | 455 |  | 
 | 456 | static void rgrp_go_unlock(struct gfs2_holder *gh) | 
 | 457 | { | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 458 | 	gfs2_rgrp_bh_put(gh->gh_gl->gl_object); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 459 | } | 
 | 460 |  | 
 | 461 | /** | 
 | 462 |  * trans_go_xmote_th - promote/demote the transaction glock | 
 | 463 |  * @gl: the glock | 
 | 464 |  * @state: the requested state | 
 | 465 |  * @flags: | 
 | 466 |  * | 
 | 467 |  */ | 
 | 468 |  | 
 | 469 | static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state, | 
 | 470 | 			      int flags) | 
 | 471 | { | 
 | 472 | 	struct gfs2_sbd *sdp = gl->gl_sbd; | 
 | 473 |  | 
 | 474 | 	if (gl->gl_state != LM_ST_UNLOCKED && | 
 | 475 | 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | 
 | 476 | 		gfs2_meta_syncfs(sdp); | 
 | 477 | 		gfs2_log_shutdown(sdp); | 
 | 478 | 	} | 
 | 479 |  | 
 | 480 | 	gfs2_glock_xmote_th(gl, state, flags); | 
 | 481 | } | 
 | 482 |  | 
 | 483 | /** | 
 | 484 |  * trans_go_xmote_bh - After promoting/demoting the transaction glock | 
 | 485 |  * @gl: the glock | 
 | 486 |  * | 
 | 487 |  */ | 
 | 488 |  | 
 | 489 | static void trans_go_xmote_bh(struct gfs2_glock *gl) | 
 | 490 | { | 
 | 491 | 	struct gfs2_sbd *sdp = gl->gl_sbd; | 
| Steven Whitehouse | feaa7bb | 2006-06-14 15:32:57 -0400 | [diff] [blame] | 492 | 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 493 | 	struct gfs2_glock *j_gl = ip->i_gl; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 494 | 	struct gfs2_log_header head; | 
 | 495 | 	int error; | 
 | 496 |  | 
 | 497 | 	if (gl->gl_state != LM_ST_UNLOCKED && | 
 | 498 | 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | 
| Steven Whitehouse | feaa7bb | 2006-06-14 15:32:57 -0400 | [diff] [blame] | 499 | 		gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode)); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 500 | 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); | 
 | 501 |  | 
 | 502 | 		error = gfs2_find_jhead(sdp->sd_jdesc, &head); | 
 | 503 | 		if (error) | 
 | 504 | 			gfs2_consist(sdp); | 
 | 505 | 		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) | 
 | 506 | 			gfs2_consist(sdp); | 
 | 507 |  | 
 | 508 | 		/*  Initialize some head of the log stuff  */ | 
 | 509 | 		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { | 
 | 510 | 			sdp->sd_log_sequence = head.lh_sequence + 1; | 
 | 511 | 			gfs2_log_pointers_init(sdp, head.lh_blkno); | 
 | 512 | 		} | 
 | 513 | 	} | 
 | 514 | } | 
 | 515 |  | 
 | 516 | /** | 
 | 517 |  * trans_go_drop_th - unlock the transaction glock | 
 | 518 |  * @gl: the glock | 
 | 519 |  * | 
 | 520 |  * We want to sync the device even with localcaching.  Remember | 
 | 521 |  * that localcaching journal replay only marks buffers dirty. | 
 | 522 |  */ | 
 | 523 |  | 
 | 524 | static void trans_go_drop_th(struct gfs2_glock *gl) | 
 | 525 | { | 
 | 526 | 	struct gfs2_sbd *sdp = gl->gl_sbd; | 
 | 527 |  | 
 | 528 | 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | 
 | 529 | 		gfs2_meta_syncfs(sdp); | 
 | 530 | 		gfs2_log_shutdown(sdp); | 
 | 531 | 	} | 
 | 532 |  | 
 | 533 | 	gfs2_glock_drop_th(gl); | 
 | 534 | } | 
 | 535 |  | 
 | 536 | /** | 
 | 537 |  * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock | 
 | 538 |  * @gl: the glock | 
 | 539 |  * | 
 | 540 |  * Returns: 1 if it's ok | 
 | 541 |  */ | 
 | 542 |  | 
 | 543 | static int quota_go_demote_ok(struct gfs2_glock *gl) | 
 | 544 | { | 
 | 545 | 	return !atomic_read(&gl->gl_lvb_count); | 
 | 546 | } | 
 | 547 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 548 | const struct gfs2_glock_operations gfs2_meta_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 549 | 	.go_xmote_th = gfs2_glock_xmote_th, | 
 | 550 | 	.go_drop_th = gfs2_glock_drop_th, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 551 | 	.go_type = LM_TYPE_META, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 552 | }; | 
 | 553 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 554 | const struct gfs2_glock_operations gfs2_inode_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 555 | 	.go_xmote_th = inode_go_xmote_th, | 
 | 556 | 	.go_xmote_bh = inode_go_xmote_bh, | 
 | 557 | 	.go_drop_th = inode_go_drop_th, | 
 | 558 | 	.go_sync = inode_go_sync, | 
 | 559 | 	.go_inval = inode_go_inval, | 
 | 560 | 	.go_demote_ok = inode_go_demote_ok, | 
 | 561 | 	.go_lock = inode_go_lock, | 
 | 562 | 	.go_unlock = inode_go_unlock, | 
 | 563 | 	.go_greedy = inode_greedy, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 564 | 	.go_type = LM_TYPE_INODE, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 565 | }; | 
 | 566 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 567 | const struct gfs2_glock_operations gfs2_rgrp_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 568 | 	.go_xmote_th = gfs2_glock_xmote_th, | 
 | 569 | 	.go_drop_th = gfs2_glock_drop_th, | 
 | 570 | 	.go_sync = meta_go_sync, | 
 | 571 | 	.go_inval = meta_go_inval, | 
 | 572 | 	.go_demote_ok = rgrp_go_demote_ok, | 
 | 573 | 	.go_lock = rgrp_go_lock, | 
 | 574 | 	.go_unlock = rgrp_go_unlock, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 575 | 	.go_type = LM_TYPE_RGRP, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 576 | }; | 
 | 577 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 578 | const struct gfs2_glock_operations gfs2_trans_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 579 | 	.go_xmote_th = trans_go_xmote_th, | 
 | 580 | 	.go_xmote_bh = trans_go_xmote_bh, | 
 | 581 | 	.go_drop_th = trans_go_drop_th, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 582 | 	.go_type = LM_TYPE_NONDISK, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 583 | }; | 
 | 584 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 585 | const struct gfs2_glock_operations gfs2_iopen_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 586 | 	.go_xmote_th = gfs2_glock_xmote_th, | 
 | 587 | 	.go_drop_th = gfs2_glock_drop_th, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 588 | 	.go_type = LM_TYPE_IOPEN, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 589 | }; | 
 | 590 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 591 | const struct gfs2_glock_operations gfs2_flock_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 592 | 	.go_xmote_th = gfs2_glock_xmote_th, | 
 | 593 | 	.go_drop_th = gfs2_glock_drop_th, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 594 | 	.go_type = LM_TYPE_FLOCK, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 595 | }; | 
 | 596 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 597 | const struct gfs2_glock_operations gfs2_nondisk_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 598 | 	.go_xmote_th = gfs2_glock_xmote_th, | 
 | 599 | 	.go_drop_th = gfs2_glock_drop_th, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 600 | 	.go_type = LM_TYPE_NONDISK, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 601 | }; | 
 | 602 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 603 | const struct gfs2_glock_operations gfs2_quota_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 604 | 	.go_xmote_th = gfs2_glock_xmote_th, | 
 | 605 | 	.go_drop_th = gfs2_glock_drop_th, | 
 | 606 | 	.go_demote_ok = quota_go_demote_ok, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 607 | 	.go_type = LM_TYPE_QUOTA, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 608 | }; | 
 | 609 |  | 
| Steven Whitehouse | 8fb4b53 | 2006-08-30 09:30:00 -0400 | [diff] [blame] | 610 | const struct gfs2_glock_operations gfs2_journal_glops = { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 611 | 	.go_xmote_th = gfs2_glock_xmote_th, | 
 | 612 | 	.go_drop_th = gfs2_glock_drop_th, | 
| Steven Whitehouse | ea67eed | 2006-09-05 10:53:09 -0400 | [diff] [blame] | 613 | 	.go_type = LM_TYPE_JOURNAL, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 614 | }; | 
 | 615 |  |