blob: b8aa816bb6eb278c77474e0c3cdf0910e13d2502 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Steven Whitehouse3a8a9a12006-05-18 15:09:15 -04003 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/delay.h>
16#include <linux/sort.h>
17#include <linux/jhash.h>
Steven Whitehoused0dc80d2006-03-29 14:36:49 -050018#include <linux/kallsyms.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050019#include <linux/gfs2_ondisk.h>
Steven Whitehouse24264432006-09-11 21:40:30 -040020#include <linux/list.h>
Fabio Massimo Di Nitto7d308592006-09-19 07:56:29 +020021#include <linux/lm_interface.h>
Steven Whitehousefee852e2007-01-17 15:33:23 +000022#include <linux/wait.h>
akpm@linux-foundation.org95d97b72007-03-05 23:10:39 -080023#include <linux/module.h>
Steven Whitehouse61be0842007-01-29 11:51:45 +000024#include <linux/rwsem.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000025#include <asm/uaccess.h>
Robert Peterson7c52b162007-03-16 10:26:37 +000026#include <linux/seq_file.h>
27#include <linux/debugfs.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000028
29#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050030#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031#include "glock.h"
32#include "glops.h"
33#include "inode.h"
34#include "lm.h"
35#include "lops.h"
36#include "meta_io.h"
37#include "quota.h"
38#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050039#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000040
Steven Whitehouse37b2fa62006-09-08 13:35:56 -040041struct gfs2_gl_hash_bucket {
Steven Whitehouseb6397892006-09-12 10:10:01 -040042 struct hlist_head hb_list;
Steven Whitehouse37b2fa62006-09-08 13:35:56 -040043};
44
Robert Peterson7c52b162007-03-16 10:26:37 +000045struct glock_iter {
46 int hash; /* hash bucket index */
47 struct gfs2_sbd *sdp; /* incore superblock */
48 struct gfs2_glock *gl; /* current glock struct */
49 struct hlist_head *hb_list; /* current hash bucket ptr */
50 struct seq_file *seq; /* sequence file for debugfs */
51 char string[512]; /* scratch space */
52};
53
David Teiglandb3b94fa2006-01-16 16:50:04 +000054typedef void (*glock_examiner) (struct gfs2_glock * gl);
55
Adrian Bunk08bc2db2006-04-28 10:59:12 -040056static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +000057static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
Steven Whitehouseb5d32be2007-01-22 12:15:34 -050058static void gfs2_glock_drop_th(struct gfs2_glock *gl);
Steven Whitehouse61be0842007-01-29 11:51:45 +000059static DECLARE_RWSEM(gfs2_umount_flush_sem);
Robert Peterson7c52b162007-03-16 10:26:37 +000060static struct dentry *gfs2_root;
Adrian Bunk08bc2db2006-04-28 10:59:12 -040061
Steven Whitehouseb6397892006-09-12 10:10:01 -040062#define GFS2_GL_HASH_SHIFT 15
Steven Whitehouse087efdd2006-09-09 16:59:11 -040063#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
64#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
65
Steven Whitehouse85d1da62006-09-07 14:40:21 -040066static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
Steven Whitehouse087efdd2006-09-09 16:59:11 -040067
68/*
69 * Despite what you might think, the numbers below are not arbitrary :-)
70 * They are taken from the ipv4 routing hash code, which is well tested
71 * and thus should be nearly optimal. Later on we might tweek the numbers
72 * but for now this should be fine.
73 *
74 * The reason for putting the locks in a separate array from the list heads
75 * is that we can have fewer locks than list heads and save memory. We use
76 * the same hash function for both, but with a different hash mask.
77 */
78#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
79 defined(CONFIG_PROVE_LOCKING)
80
81#ifdef CONFIG_LOCKDEP
82# define GL_HASH_LOCK_SZ 256
83#else
84# if NR_CPUS >= 32
85# define GL_HASH_LOCK_SZ 4096
86# elif NR_CPUS >= 16
87# define GL_HASH_LOCK_SZ 2048
88# elif NR_CPUS >= 8
89# define GL_HASH_LOCK_SZ 1024
90# elif NR_CPUS >= 4
91# define GL_HASH_LOCK_SZ 512
92# else
93# define GL_HASH_LOCK_SZ 256
94# endif
95#endif
96
97/* We never want more locks than chains */
98#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
99# undef GL_HASH_LOCK_SZ
100# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
101#endif
102
103static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
104
105static inline rwlock_t *gl_lock_addr(unsigned int x)
106{
Steven Whitehouse94610612006-09-09 18:59:27 -0400107 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400108}
109#else /* not SMP, so no spinlocks required */
Randy Dunlap0ac23062006-11-28 22:29:19 -0800110static inline rwlock_t *gl_lock_addr(unsigned int x)
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400111{
112 return NULL;
113}
114#endif
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400115
David Teiglandb3b94fa2006-01-16 16:50:04 +0000116/**
117 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
118 * @actual: the current state of the lock
119 * @requested: the lock state that was requested by the caller
120 * @flags: the modifier flags passed in by the caller
121 *
122 * Returns: 1 if the locks are compatible, 0 otherwise
123 */
124
125static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
126 int flags)
127{
128 if (actual == requested)
129 return 1;
130
131 if (flags & GL_EXACT)
132 return 0;
133
134 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
135 return 1;
136
137 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
138 return 1;
139
140 return 0;
141}
142
143/**
144 * gl_hash() - Turn glock number into hash bucket number
145 * @lock: The glock number
146 *
147 * Returns: The number of the corresponding hash bucket
148 */
149
Steven Whitehouseb8547852006-09-07 13:12:27 -0400150static unsigned int gl_hash(const struct gfs2_sbd *sdp,
151 const struct lm_lockname *name)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000152{
153 unsigned int h;
154
Steven Whitehousecd915492006-09-04 12:49:07 -0400155 h = jhash(&name->ln_number, sizeof(u64), 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000156 h = jhash(&name->ln_type, sizeof(unsigned int), h);
Steven Whitehouseb8547852006-09-07 13:12:27 -0400157 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000158 h &= GFS2_GL_HASH_MASK;
159
160 return h;
161}
162
163/**
164 * glock_free() - Perform a few checks and then release struct gfs2_glock
165 * @gl: The glock to release
166 *
167 * Also calls lock module to release its internal structure for this glock.
168 *
169 */
170
171static void glock_free(struct gfs2_glock *gl)
172{
173 struct gfs2_sbd *sdp = gl->gl_sbd;
174 struct inode *aspace = gl->gl_aspace;
175
176 gfs2_lm_put_lock(sdp, gl->gl_lock);
177
178 if (aspace)
179 gfs2_aspace_put(aspace);
180
181 kmem_cache_free(gfs2_glock_cachep, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000182}
183
184/**
185 * gfs2_glock_hold() - increment reference count on glock
186 * @gl: The glock to hold
187 *
188 */
189
190void gfs2_glock_hold(struct gfs2_glock *gl)
191{
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400192 atomic_inc(&gl->gl_ref);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000193}
194
195/**
196 * gfs2_glock_put() - Decrement reference count on glock
197 * @gl: The glock to put
198 *
199 */
200
201int gfs2_glock_put(struct gfs2_glock *gl)
202{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000203 int rv = 0;
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400204 struct gfs2_sbd *sdp = gl->gl_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000205
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400206 write_lock(gl_lock_addr(gl->gl_hash));
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400207 if (atomic_dec_and_test(&gl->gl_ref)) {
Steven Whitehouseb6397892006-09-12 10:10:01 -0400208 hlist_del(&gl->gl_list);
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400209 write_unlock(gl_lock_addr(gl->gl_hash));
Steven Whitehouse190562b2006-04-20 16:57:23 -0400210 BUG_ON(spin_is_locked(&gl->gl_spin));
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400211 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
212 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
213 gfs2_assert(sdp, list_empty(&gl->gl_holders));
214 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400215 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000216 glock_free(gl);
217 rv = 1;
218 goto out;
219 }
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400220 write_unlock(gl_lock_addr(gl->gl_hash));
Steven Whitehousea2242db2006-08-24 17:03:05 -0400221out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000222 return rv;
223}
224
225/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000226 * search_bucket() - Find struct gfs2_glock by lock number
227 * @bucket: the bucket to search
228 * @name: The lock name
229 *
230 * Returns: NULL, or the struct gfs2_glock with the requested number
231 */
232
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400233static struct gfs2_glock *search_bucket(unsigned int hash,
Steven Whitehouse899be4d2006-08-30 12:50:28 -0400234 const struct gfs2_sbd *sdp,
Steven Whitehoused6a53722006-08-30 11:16:23 -0400235 const struct lm_lockname *name)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000236{
237 struct gfs2_glock *gl;
Steven Whitehouseb6397892006-09-12 10:10:01 -0400238 struct hlist_node *h;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000239
Steven Whitehouseb6397892006-09-12 10:10:01 -0400240 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000241 if (!lm_name_equal(&gl->gl_name, name))
242 continue;
Steven Whitehouse899be4d2006-08-30 12:50:28 -0400243 if (gl->gl_sbd != sdp)
244 continue;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000245
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400246 atomic_inc(&gl->gl_ref);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000247
248 return gl;
249 }
250
251 return NULL;
252}
253
254/**
255 * gfs2_glock_find() - Find glock by lock number
256 * @sdp: The GFS2 superblock
257 * @name: The lock name
258 *
259 * Returns: NULL, or the struct gfs2_glock with the requested number
260 */
261
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400262static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
Steven Whitehoused6a53722006-08-30 11:16:23 -0400263 const struct lm_lockname *name)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000264{
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400265 unsigned int hash = gl_hash(sdp, name);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000266 struct gfs2_glock *gl;
267
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400268 read_lock(gl_lock_addr(hash));
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400269 gl = search_bucket(hash, sdp, name);
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400270 read_unlock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000271
272 return gl;
273}
274
275/**
276 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
277 * @sdp: The GFS2 superblock
278 * @number: the lock number
279 * @glops: The glock_operations to use
280 * @create: If 0, don't create the glock if it doesn't exist
281 * @glp: the glock is returned here
282 *
283 * This does not lock a glock, just finds/creates structures for one.
284 *
285 * Returns: errno
286 */
287
Steven Whitehousecd915492006-09-04 12:49:07 -0400288int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400289 const struct gfs2_glock_operations *glops, int create,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000290 struct gfs2_glock **glp)
291{
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400292 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
David Teiglandb3b94fa2006-01-16 16:50:04 +0000293 struct gfs2_glock *gl, *tmp;
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400294 unsigned int hash = gl_hash(sdp, &name);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000295 int error;
296
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400297 read_lock(gl_lock_addr(hash));
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400298 gl = search_bucket(hash, sdp, &name);
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400299 read_unlock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000300
301 if (gl || !create) {
302 *glp = gl;
303 return 0;
304 }
305
306 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
307 if (!gl)
308 return -ENOMEM;
309
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400310 gl->gl_flags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000311 gl->gl_name = name;
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400312 atomic_set(&gl->gl_ref, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000313 gl->gl_state = LM_ST_UNLOCKED;
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400314 gl->gl_hash = hash;
Steven Whitehouse320dd102006-05-18 16:25:27 -0400315 gl->gl_owner = NULL;
316 gl->gl_ip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000317 gl->gl_ops = glops;
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400318 gl->gl_req_gh = NULL;
319 gl->gl_req_bh = NULL;
320 gl->gl_vn = 0;
321 gl->gl_stamp = jiffies;
322 gl->gl_object = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000323 gl->gl_sbd = sdp;
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400324 gl->gl_aspace = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000325 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000326
327 /* If this glock protects actual on-disk data or metadata blocks,
328 create a VFS inode to manage the pages/buffers holding them. */
Steven Whitehouse50299962006-09-04 09:49:55 -0400329 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000330 gl->gl_aspace = gfs2_aspace_get(sdp);
331 if (!gl->gl_aspace) {
332 error = -ENOMEM;
333 goto fail;
334 }
335 }
336
337 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
338 if (error)
339 goto fail_aspace;
340
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400341 write_lock(gl_lock_addr(hash));
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400342 tmp = search_bucket(hash, sdp, &name);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000343 if (tmp) {
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400344 write_unlock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000345 glock_free(gl);
346 gl = tmp;
347 } else {
Steven Whitehouseb6397892006-09-12 10:10:01 -0400348 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400349 write_unlock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000350 }
351
352 *glp = gl;
353
354 return 0;
355
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400356fail_aspace:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000357 if (gl->gl_aspace)
358 gfs2_aspace_put(gl->gl_aspace);
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400359fail:
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400360 kmem_cache_free(gfs2_glock_cachep, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000361 return error;
362}
363
364/**
365 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
366 * @gl: the glock
367 * @state: the state we're requesting
368 * @flags: the modifier flags
369 * @gh: the holder structure
370 *
371 */
372
Steven Whitehouse190562b2006-04-20 16:57:23 -0400373void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000374 struct gfs2_holder *gh)
375{
376 INIT_LIST_HEAD(&gh->gh_list);
377 gh->gh_gl = gl;
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500378 gh->gh_ip = (unsigned long)__builtin_return_address(0);
Steven Whitehouse190562b2006-04-20 16:57:23 -0400379 gh->gh_owner = current;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000380 gh->gh_state = state;
381 gh->gh_flags = flags;
382 gh->gh_error = 0;
383 gh->gh_iflags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000384 gfs2_glock_hold(gl);
385}
386
387/**
388 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
389 * @state: the state we're requesting
390 * @flags: the modifier flags
391 * @gh: the holder structure
392 *
393 * Don't mess with the glock.
394 *
395 */
396
Steven Whitehouse190562b2006-04-20 16:57:23 -0400397void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000398{
399 gh->gh_state = state;
Steven Whitehouse579b78a2006-04-26 14:58:26 -0400400 gh->gh_flags = flags;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000401 gh->gh_iflags = 0;
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500402 gh->gh_ip = (unsigned long)__builtin_return_address(0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000403}
404
405/**
406 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
407 * @gh: the holder structure
408 *
409 */
410
411void gfs2_holder_uninit(struct gfs2_holder *gh)
412{
413 gfs2_glock_put(gh->gh_gl);
414 gh->gh_gl = NULL;
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500415 gh->gh_ip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000416}
417
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000418static void gfs2_holder_wake(struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000419{
Steven Whitehousefee852e2007-01-17 15:33:23 +0000420 clear_bit(HIF_WAIT, &gh->gh_iflags);
421 smp_mb();
422 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
423}
424
425static int holder_wait(void *word)
426{
427 schedule();
428 return 0;
429}
430
431static void wait_on_holder(struct gfs2_holder *gh)
432{
433 might_sleep();
434 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
435}
436
David Teiglandb3b94fa2006-01-16 16:50:04 +0000437/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000438 * rq_mutex - process a mutex request in the queue
439 * @gh: the glock holder
440 *
441 * Returns: 1 if the queue is blocked
442 */
443
444static int rq_mutex(struct gfs2_holder *gh)
445{
446 struct gfs2_glock *gl = gh->gh_gl;
447
448 list_del_init(&gh->gh_list);
449 /* gh->gh_error never examined. */
450 set_bit(GLF_LOCK, &gl->gl_flags);
Steven Whitehoused043e192007-01-23 16:56:36 -0500451 clear_bit(HIF_WAIT, &gh->gh_iflags);
Steven Whitehousefee852e2007-01-17 15:33:23 +0000452 smp_mb();
453 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000454
455 return 1;
456}
457
458/**
459 * rq_promote - process a promote request in the queue
460 * @gh: the glock holder
461 *
462 * Acquire a new inter-node lock, or change a lock state to more restrictive.
463 *
464 * Returns: 1 if the queue is blocked
465 */
466
467static int rq_promote(struct gfs2_holder *gh)
468{
469 struct gfs2_glock *gl = gh->gh_gl;
470 struct gfs2_sbd *sdp = gl->gl_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000471
472 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
473 if (list_empty(&gl->gl_holders)) {
474 gl->gl_req_gh = gh;
475 set_bit(GLF_LOCK, &gl->gl_flags);
476 spin_unlock(&gl->gl_spin);
477
478 if (atomic_read(&sdp->sd_reclaim_count) >
479 gfs2_tune_get(sdp, gt_reclaim_limit) &&
480 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
481 gfs2_reclaim_glock(sdp);
482 gfs2_reclaim_glock(sdp);
483 }
484
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000485 gfs2_glock_xmote_th(gh->gh_gl, gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000486 spin_lock(&gl->gl_spin);
487 }
488 return 1;
489 }
490
491 if (list_empty(&gl->gl_holders)) {
492 set_bit(HIF_FIRST, &gh->gh_iflags);
493 set_bit(GLF_LOCK, &gl->gl_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000494 } else {
495 struct gfs2_holder *next_gh;
Steven Whitehouse1c0f4872007-01-22 12:10:39 -0500496 if (gh->gh_state == LM_ST_EXCLUSIVE)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000497 return 1;
498 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
499 gh_list);
Steven Whitehouse1c0f4872007-01-22 12:10:39 -0500500 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000501 return 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000502 }
503
504 list_move_tail(&gh->gh_list, &gl->gl_holders);
505 gh->gh_error = 0;
506 set_bit(HIF_HOLDER, &gh->gh_iflags);
507
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000508 gfs2_holder_wake(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000509
510 return 0;
511}
512
513/**
514 * rq_demote - process a demote request in the queue
515 * @gh: the glock holder
516 *
517 * Returns: 1 if the queue is blocked
518 */
519
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000520static int rq_demote(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000521{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000522 if (!list_empty(&gl->gl_holders))
523 return 1;
524
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000525 if (gl->gl_state == gl->gl_demote_state ||
526 gl->gl_state == LM_ST_UNLOCKED) {
527 clear_bit(GLF_DEMOTE, &gl->gl_flags);
528 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000529 }
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000530 set_bit(GLF_LOCK, &gl->gl_flags);
531 spin_unlock(&gl->gl_spin);
532 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
533 gl->gl_state != LM_ST_EXCLUSIVE)
534 gfs2_glock_drop_th(gl);
535 else
536 gfs2_glock_xmote_th(gl, NULL);
537 spin_lock(&gl->gl_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000538
539 return 0;
540}
541
542/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000543 * run_queue - process holder structures on a glock
544 * @gl: the glock
545 *
546 */
David Teiglandb3b94fa2006-01-16 16:50:04 +0000547static void run_queue(struct gfs2_glock *gl)
548{
549 struct gfs2_holder *gh;
550 int blocked = 1;
551
552 for (;;) {
553 if (test_bit(GLF_LOCK, &gl->gl_flags))
554 break;
555
556 if (!list_empty(&gl->gl_waiters1)) {
557 gh = list_entry(gl->gl_waiters1.next,
558 struct gfs2_holder, gh_list);
559
560 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
561 blocked = rq_mutex(gh);
562 else
563 gfs2_assert_warn(gl->gl_sbd, 0);
564
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000565 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
566 blocked = rq_demote(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000567 } else if (!list_empty(&gl->gl_waiters3)) {
568 gh = list_entry(gl->gl_waiters3.next,
569 struct gfs2_holder, gh_list);
570
571 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
572 blocked = rq_promote(gh);
573 else
574 gfs2_assert_warn(gl->gl_sbd, 0);
575
576 } else
577 break;
578
579 if (blocked)
580 break;
581 }
582}
583
584/**
585 * gfs2_glmutex_lock - acquire a local lock on a glock
586 * @gl: the glock
587 *
588 * Gives caller exclusive access to manipulate a glock structure.
589 */
590
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400591static void gfs2_glmutex_lock(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000592{
593 struct gfs2_holder gh;
594
595 gfs2_holder_init(gl, 0, 0, &gh);
596 set_bit(HIF_MUTEX, &gh.gh_iflags);
Steven Whitehousefee852e2007-01-17 15:33:23 +0000597 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
598 BUG();
David Teiglandb3b94fa2006-01-16 16:50:04 +0000599
600 spin_lock(&gl->gl_spin);
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400601 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000602 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400603 } else {
Steven Whitehouse320dd102006-05-18 16:25:27 -0400604 gl->gl_owner = current;
605 gl->gl_ip = (unsigned long)__builtin_return_address(0);
Steven Whitehousefee852e2007-01-17 15:33:23 +0000606 clear_bit(HIF_WAIT, &gh.gh_iflags);
607 smp_mb();
608 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
Steven Whitehouse320dd102006-05-18 16:25:27 -0400609 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000610 spin_unlock(&gl->gl_spin);
611
Steven Whitehousefee852e2007-01-17 15:33:23 +0000612 wait_on_holder(&gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000613 gfs2_holder_uninit(&gh);
614}
615
616/**
617 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
618 * @gl: the glock
619 *
620 * Returns: 1 if the glock is acquired
621 */
622
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400623static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000624{
625 int acquired = 1;
626
627 spin_lock(&gl->gl_spin);
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400628 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000629 acquired = 0;
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400630 } else {
Steven Whitehouse320dd102006-05-18 16:25:27 -0400631 gl->gl_owner = current;
632 gl->gl_ip = (unsigned long)__builtin_return_address(0);
633 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000634 spin_unlock(&gl->gl_spin);
635
636 return acquired;
637}
638
639/**
640 * gfs2_glmutex_unlock - release a local lock on a glock
641 * @gl: the glock
642 *
643 */
644
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400645static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000646{
647 spin_lock(&gl->gl_spin);
648 clear_bit(GLF_LOCK, &gl->gl_flags);
Steven Whitehouse320dd102006-05-18 16:25:27 -0400649 gl->gl_owner = NULL;
650 gl->gl_ip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000651 run_queue(gl);
Steven Whitehouse190562b2006-04-20 16:57:23 -0400652 BUG_ON(!spin_is_locked(&gl->gl_spin));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000653 spin_unlock(&gl->gl_spin);
654}
655
656/**
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000657 * handle_callback - process a demote request
David Teiglandb3b94fa2006-01-16 16:50:04 +0000658 * @gl: the glock
659 * @state: the state the caller wants us to change to
660 *
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000661 * There are only two requests that we are going to see in actual
662 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teiglandb3b94fa2006-01-16 16:50:04 +0000663 */
664
665static void handle_callback(struct gfs2_glock *gl, unsigned int state)
666{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000667 spin_lock(&gl->gl_spin);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000668 if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
669 gl->gl_demote_state = state;
670 gl->gl_demote_time = jiffies;
671 } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
672 gl->gl_demote_state = state;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000673 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000674 spin_unlock(&gl->gl_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000675}
676
677/**
678 * state_change - record that the glock is now in a different state
679 * @gl: the glock
680 * @new_state the new state
681 *
682 */
683
684static void state_change(struct gfs2_glock *gl, unsigned int new_state)
685{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000686 int held1, held2;
687
688 held1 = (gl->gl_state != LM_ST_UNLOCKED);
689 held2 = (new_state != LM_ST_UNLOCKED);
690
691 if (held1 != held2) {
David Teigland6a6b3d02006-02-23 10:11:47 +0000692 if (held2)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000693 gfs2_glock_hold(gl);
David Teigland6a6b3d02006-02-23 10:11:47 +0000694 else
David Teiglandb3b94fa2006-01-16 16:50:04 +0000695 gfs2_glock_put(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000696 }
697
698 gl->gl_state = new_state;
699}
700
701/**
702 * xmote_bh - Called after the lock module is done acquiring a lock
703 * @gl: The glock in question
704 * @ret: the int returned from the lock module
705 *
706 */
707
708static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
709{
710 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400711 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000712 struct gfs2_holder *gh = gl->gl_req_gh;
713 int prev_state = gl->gl_state;
714 int op_done = 1;
715
716 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
Steven Whitehouse12132932007-01-22 13:09:04 -0500717 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000718 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
719
720 state_change(gl, ret & LM_OUT_ST_MASK);
721
722 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
723 if (glops->go_inval)
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500724 glops->go_inval(gl, DIO_METADATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000725 } else if (gl->gl_state == LM_ST_DEFERRED) {
726 /* We might not want to do this here.
727 Look at moving to the inode glops. */
728 if (glops->go_inval)
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500729 glops->go_inval(gl, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000730 }
731
732 /* Deal with each possible exit condition */
733
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000734 if (!gh) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000735 gl->gl_stamp = jiffies;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000736 if (ret & LM_OUT_CANCELED)
737 op_done = 0;
738 else
739 clear_bit(GLF_DEMOTE, &gl->gl_flags);
740 } else {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000741 spin_lock(&gl->gl_spin);
742 list_del_init(&gh->gh_list);
743 gh->gh_error = -EIO;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000744 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
745 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000746 gh->gh_error = GLR_CANCELED;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000747 if (ret & LM_OUT_CANCELED)
748 goto out;
749 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
750 list_add_tail(&gh->gh_list, &gl->gl_holders);
751 gh->gh_error = 0;
752 set_bit(HIF_HOLDER, &gh->gh_iflags);
753 set_bit(HIF_FIRST, &gh->gh_iflags);
754 op_done = 0;
755 goto out;
756 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000757 gh->gh_error = GLR_TRYFAILED;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000758 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
759 goto out;
760 gh->gh_error = -EINVAL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000761 if (gfs2_assert_withdraw(sdp, 0) == -1)
762 fs_err(sdp, "ret = 0x%.8X\n", ret);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000763out:
764 spin_unlock(&gl->gl_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000765 }
766
767 if (glops->go_xmote_bh)
768 glops->go_xmote_bh(gl);
769
770 if (op_done) {
771 spin_lock(&gl->gl_spin);
772 gl->gl_req_gh = NULL;
773 gl->gl_req_bh = NULL;
774 clear_bit(GLF_LOCK, &gl->gl_flags);
775 run_queue(gl);
776 spin_unlock(&gl->gl_spin);
777 }
778
779 gfs2_glock_put(gl);
780
Steven Whitehousefee852e2007-01-17 15:33:23 +0000781 if (gh)
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000782 gfs2_holder_wake(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000783}
784
785/**
786 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
787 * @gl: The glock in question
788 * @state: the requested state
789 * @flags: modifier flags to the lock call
790 *
791 */
792
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000793void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000794{
795 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000796 int flags = gh ? gh->gh_flags : 0;
797 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400798 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000799 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
800 LM_FLAG_NOEXP | LM_FLAG_ANY |
801 LM_FLAG_PRIORITY);
802 unsigned int lck_ret;
803
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500804 if (glops->go_xmote_th)
805 glops->go_xmote_th(gl);
806
David Teiglandb3b94fa2006-01-16 16:50:04 +0000807 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
Steven Whitehouse12132932007-01-22 13:09:04 -0500808 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000809 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
810 gfs2_assert_warn(sdp, state != gl->gl_state);
811
David Teiglandb3b94fa2006-01-16 16:50:04 +0000812 gfs2_glock_hold(gl);
813 gl->gl_req_bh = xmote_bh;
814
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400815 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000816
817 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
818 return;
819
820 if (lck_ret & LM_OUT_ASYNC)
821 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
822 else
823 xmote_bh(gl, lck_ret);
824}
825
826/**
827 * drop_bh - Called after a lock module unlock completes
828 * @gl: the glock
829 * @ret: the return status
830 *
831 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
832 * Doesn't drop the reference on the glock the top half took out
833 *
834 */
835
836static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
837{
838 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400839 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000840 struct gfs2_holder *gh = gl->gl_req_gh;
841
David Teiglandb3b94fa2006-01-16 16:50:04 +0000842 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
Steven Whitehouse12132932007-01-22 13:09:04 -0500843 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000844 gfs2_assert_warn(sdp, !ret);
845
846 state_change(gl, LM_ST_UNLOCKED);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000847 clear_bit(GLF_DEMOTE, &gl->gl_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000848
849 if (glops->go_inval)
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500850 glops->go_inval(gl, DIO_METADATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000851
852 if (gh) {
853 spin_lock(&gl->gl_spin);
854 list_del_init(&gh->gh_list);
855 gh->gh_error = 0;
856 spin_unlock(&gl->gl_spin);
857 }
858
David Teiglandb3b94fa2006-01-16 16:50:04 +0000859 spin_lock(&gl->gl_spin);
860 gl->gl_req_gh = NULL;
861 gl->gl_req_bh = NULL;
862 clear_bit(GLF_LOCK, &gl->gl_flags);
863 run_queue(gl);
864 spin_unlock(&gl->gl_spin);
865
866 gfs2_glock_put(gl);
867
Steven Whitehousefee852e2007-01-17 15:33:23 +0000868 if (gh)
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000869 gfs2_holder_wake(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000870}
871
872/**
873 * gfs2_glock_drop_th - call into the lock module to unlock a lock
874 * @gl: the glock
875 *
876 */
877
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500878static void gfs2_glock_drop_th(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000879{
880 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400881 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000882 unsigned int ret;
883
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500884 if (glops->go_drop_th)
885 glops->go_drop_th(gl);
886
David Teiglandb3b94fa2006-01-16 16:50:04 +0000887 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
Steven Whitehouse12132932007-01-22 13:09:04 -0500888 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000889 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
890
David Teiglandb3b94fa2006-01-16 16:50:04 +0000891 gfs2_glock_hold(gl);
892 gl->gl_req_bh = drop_bh;
893
David Teiglandb3b94fa2006-01-16 16:50:04 +0000894 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
895
896 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
897 return;
898
899 if (!ret)
900 drop_bh(gl, ret);
901 else
902 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
903}
904
905/**
906 * do_cancels - cancel requests for locks stuck waiting on an expire flag
907 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
908 *
909 * Don't cancel GL_NOCANCEL requests.
910 */
911
912static void do_cancels(struct gfs2_holder *gh)
913{
914 struct gfs2_glock *gl = gh->gh_gl;
915
916 spin_lock(&gl->gl_spin);
917
918 while (gl->gl_req_gh != gh &&
919 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
920 !list_empty(&gh->gh_list)) {
Steven Whitehouse50299962006-09-04 09:49:55 -0400921 if (gl->gl_req_bh && !(gl->gl_req_gh &&
922 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000923 spin_unlock(&gl->gl_spin);
924 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
925 msleep(100);
926 spin_lock(&gl->gl_spin);
927 } else {
928 spin_unlock(&gl->gl_spin);
929 msleep(100);
930 spin_lock(&gl->gl_spin);
931 }
932 }
933
934 spin_unlock(&gl->gl_spin);
935}
936
937/**
938 * glock_wait_internal - wait on a glock acquisition
939 * @gh: the glock holder
940 *
941 * Returns: 0 on success
942 */
943
944static int glock_wait_internal(struct gfs2_holder *gh)
945{
946 struct gfs2_glock *gl = gh->gh_gl;
947 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400948 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000949
950 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
951 return -EIO;
952
953 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
954 spin_lock(&gl->gl_spin);
955 if (gl->gl_req_gh != gh &&
956 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
957 !list_empty(&gh->gh_list)) {
958 list_del_init(&gh->gh_list);
959 gh->gh_error = GLR_TRYFAILED;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000960 run_queue(gl);
961 spin_unlock(&gl->gl_spin);
962 return gh->gh_error;
963 }
964 spin_unlock(&gl->gl_spin);
965 }
966
967 if (gh->gh_flags & LM_FLAG_PRIORITY)
968 do_cancels(gh);
969
Steven Whitehousefee852e2007-01-17 15:33:23 +0000970 wait_on_holder(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000971 if (gh->gh_error)
972 return gh->gh_error;
973
974 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400975 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000976 gh->gh_flags));
977
978 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
979 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
980
981 if (glops->go_lock) {
982 gh->gh_error = glops->go_lock(gh);
983 if (gh->gh_error) {
984 spin_lock(&gl->gl_spin);
985 list_del_init(&gh->gh_list);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000986 spin_unlock(&gl->gl_spin);
987 }
988 }
989
990 spin_lock(&gl->gl_spin);
991 gl->gl_req_gh = NULL;
992 gl->gl_req_bh = NULL;
993 clear_bit(GLF_LOCK, &gl->gl_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000994 run_queue(gl);
995 spin_unlock(&gl->gl_spin);
996 }
997
998 return gh->gh_error;
999}
1000
1001static inline struct gfs2_holder *
1002find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1003{
1004 struct gfs2_holder *gh;
1005
1006 list_for_each_entry(gh, head, gh_list) {
1007 if (gh->gh_owner == owner)
1008 return gh;
1009 }
1010
1011 return NULL;
1012}
1013
Robert Peterson7c52b162007-03-16 10:26:37 +00001014static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1015{
1016 va_list args;
1017
1018 va_start(args, fmt);
1019 if (gi) {
1020 vsprintf(gi->string, fmt, args);
1021 seq_printf(gi->seq, gi->string);
1022 }
1023 else
1024 vprintk(fmt, args);
1025 va_end(args);
1026}
1027
David Teiglandb3b94fa2006-01-16 16:50:04 +00001028/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001029 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1030 * @gh: the holder structure to add
1031 *
1032 */
1033
1034static void add_to_queue(struct gfs2_holder *gh)
1035{
1036 struct gfs2_glock *gl = gh->gh_gl;
1037 struct gfs2_holder *existing;
1038
Steven Whitehouse190562b2006-04-20 16:57:23 -04001039 BUG_ON(!gh->gh_owner);
Steven Whitehousefee852e2007-01-17 15:33:23 +00001040 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1041 BUG();
Steven Whitehouse190562b2006-04-20 16:57:23 -04001042
David Teiglandb3b94fa2006-01-16 16:50:04 +00001043 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1044 if (existing) {
Steven Whitehouse5965b1f2006-04-26 13:21:55 -04001045 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
Abhijith Das86384602006-08-25 11:13:37 -05001046 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001047 printk(KERN_INFO "lock type : %d lock state : %d\n",
Abhijith Das86384602006-08-25 11:13:37 -05001048 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
Steven Whitehouse5965b1f2006-04-26 13:21:55 -04001049 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
Abhijith Das86384602006-08-25 11:13:37 -05001050 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001051 printk(KERN_INFO "lock type : %d lock state : %d\n",
Abhijith Das86384602006-08-25 11:13:37 -05001052 gl->gl_name.ln_type, gl->gl_state);
Steven Whitehouse5965b1f2006-04-26 13:21:55 -04001053 BUG();
David Teiglandb3b94fa2006-01-16 16:50:04 +00001054 }
1055
1056 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1057 if (existing) {
Steven Whitehouse5965b1f2006-04-26 13:21:55 -04001058 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1059 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1060 BUG();
David Teiglandb3b94fa2006-01-16 16:50:04 +00001061 }
1062
David Teiglandb3b94fa2006-01-16 16:50:04 +00001063 if (gh->gh_flags & LM_FLAG_PRIORITY)
1064 list_add(&gh->gh_list, &gl->gl_waiters3);
1065 else
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001066 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001067}
1068
1069/**
1070 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1071 * @gh: the holder structure
1072 *
1073 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1074 *
1075 * Returns: 0, GLR_TRYFAILED, or errno on failure
1076 */
1077
1078int gfs2_glock_nq(struct gfs2_holder *gh)
1079{
1080 struct gfs2_glock *gl = gh->gh_gl;
1081 struct gfs2_sbd *sdp = gl->gl_sbd;
1082 int error = 0;
1083
Steven Whitehouse320dd102006-05-18 16:25:27 -04001084restart:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001085 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1086 set_bit(HIF_ABORTED, &gh->gh_iflags);
1087 return -EIO;
1088 }
1089
1090 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1091
1092 spin_lock(&gl->gl_spin);
1093 add_to_queue(gh);
1094 run_queue(gl);
1095 spin_unlock(&gl->gl_spin);
1096
1097 if (!(gh->gh_flags & GL_ASYNC)) {
1098 error = glock_wait_internal(gh);
1099 if (error == GLR_CANCELED) {
Steven Whitehouse190562b2006-04-20 16:57:23 -04001100 msleep(100);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001101 goto restart;
1102 }
1103 }
1104
David Teiglandb3b94fa2006-01-16 16:50:04 +00001105 return error;
1106}
1107
1108/**
1109 * gfs2_glock_poll - poll to see if an async request has been completed
1110 * @gh: the holder
1111 *
1112 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1113 */
1114
1115int gfs2_glock_poll(struct gfs2_holder *gh)
1116{
1117 struct gfs2_glock *gl = gh->gh_gl;
1118 int ready = 0;
1119
1120 spin_lock(&gl->gl_spin);
1121
1122 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1123 ready = 1;
1124 else if (list_empty(&gh->gh_list)) {
1125 if (gh->gh_error == GLR_CANCELED) {
1126 spin_unlock(&gl->gl_spin);
Steven Whitehouse190562b2006-04-20 16:57:23 -04001127 msleep(100);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001128 if (gfs2_glock_nq(gh))
1129 return 1;
1130 return 0;
1131 } else
1132 ready = 1;
1133 }
1134
1135 spin_unlock(&gl->gl_spin);
1136
1137 return ready;
1138}
1139
1140/**
1141 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1142 * @gh: the holder structure
1143 *
1144 * Returns: 0, GLR_TRYFAILED, or errno on failure
1145 */
1146
1147int gfs2_glock_wait(struct gfs2_holder *gh)
1148{
1149 int error;
1150
1151 error = glock_wait_internal(gh);
1152 if (error == GLR_CANCELED) {
Steven Whitehouse190562b2006-04-20 16:57:23 -04001153 msleep(100);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001154 gh->gh_flags &= ~GL_ASYNC;
1155 error = gfs2_glock_nq(gh);
1156 }
1157
1158 return error;
1159}
1160
1161/**
1162 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1163 * @gh: the glock holder
1164 *
1165 */
1166
1167void gfs2_glock_dq(struct gfs2_holder *gh)
1168{
1169 struct gfs2_glock *gl = gh->gh_gl;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001170 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001171
David Teiglandb3b94fa2006-01-16 16:50:04 +00001172 if (gh->gh_flags & GL_NOCACHE)
1173 handle_callback(gl, LM_ST_UNLOCKED);
1174
1175 gfs2_glmutex_lock(gl);
1176
1177 spin_lock(&gl->gl_spin);
1178 list_del_init(&gh->gh_list);
1179
1180 if (list_empty(&gl->gl_holders)) {
1181 spin_unlock(&gl->gl_spin);
1182
1183 if (glops->go_unlock)
1184 glops->go_unlock(gh);
1185
David Teiglandb3b94fa2006-01-16 16:50:04 +00001186 spin_lock(&gl->gl_spin);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001187 gl->gl_stamp = jiffies;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001188 }
1189
1190 clear_bit(GLF_LOCK, &gl->gl_flags);
1191 run_queue(gl);
1192 spin_unlock(&gl->gl_spin);
1193}
1194
David Teiglandb3b94fa2006-01-16 16:50:04 +00001195/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001196 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1197 * @gh: the holder structure
1198 *
1199 */
1200
1201void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1202{
1203 gfs2_glock_dq(gh);
1204 gfs2_holder_uninit(gh);
1205}
1206
1207/**
1208 * gfs2_glock_nq_num - acquire a glock based on lock number
1209 * @sdp: the filesystem
1210 * @number: the lock number
1211 * @glops: the glock operations for the type of glock
1212 * @state: the state to acquire the glock in
1213 * @flags: modifier flags for the aquisition
1214 * @gh: the struct gfs2_holder
1215 *
1216 * Returns: errno
1217 */
1218
Steven Whitehousecd915492006-09-04 12:49:07 -04001219int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001220 const struct gfs2_glock_operations *glops,
1221 unsigned int state, int flags, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001222{
1223 struct gfs2_glock *gl;
1224 int error;
1225
1226 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1227 if (!error) {
1228 error = gfs2_glock_nq_init(gl, state, flags, gh);
1229 gfs2_glock_put(gl);
1230 }
1231
1232 return error;
1233}
1234
1235/**
1236 * glock_compare - Compare two struct gfs2_glock structures for sorting
1237 * @arg_a: the first structure
1238 * @arg_b: the second structure
1239 *
1240 */
1241
1242static int glock_compare(const void *arg_a, const void *arg_b)
1243{
Steven Whitehousea5e08a92006-09-09 17:07:05 -04001244 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1245 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1246 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1247 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001248
1249 if (a->ln_number > b->ln_number)
Steven Whitehousea5e08a92006-09-09 17:07:05 -04001250 return 1;
1251 if (a->ln_number < b->ln_number)
1252 return -1;
Steven Whitehouse1c0f4872007-01-22 12:10:39 -05001253 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
Steven Whitehousea5e08a92006-09-09 17:07:05 -04001254 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001255}
1256
1257/**
1258 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1259 * @num_gh: the number of structures
1260 * @ghs: an array of struct gfs2_holder structures
1261 *
1262 * Returns: 0 on success (all glocks acquired),
1263 * errno on failure (no glocks acquired)
1264 */
1265
1266static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1267 struct gfs2_holder **p)
1268{
1269 unsigned int x;
1270 int error = 0;
1271
1272 for (x = 0; x < num_gh; x++)
1273 p[x] = &ghs[x];
1274
1275 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1276
1277 for (x = 0; x < num_gh; x++) {
1278 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1279
1280 error = gfs2_glock_nq(p[x]);
1281 if (error) {
1282 while (x--)
1283 gfs2_glock_dq(p[x]);
1284 break;
1285 }
1286 }
1287
1288 return error;
1289}
1290
1291/**
1292 * gfs2_glock_nq_m - acquire multiple glocks
1293 * @num_gh: the number of structures
1294 * @ghs: an array of struct gfs2_holder structures
1295 *
1296 * Figure out how big an impact this function has. Either:
1297 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1298 * 2) Forget async stuff and just call nq_m_sync()
1299 * 3) Leave it like it is
1300 *
1301 * Returns: 0 on success (all glocks acquired),
1302 * errno on failure (no glocks acquired)
1303 */
1304
1305int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1306{
1307 int *e;
1308 unsigned int x;
1309 int borked = 0, serious = 0;
1310 int error = 0;
1311
1312 if (!num_gh)
1313 return 0;
1314
1315 if (num_gh == 1) {
1316 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1317 return gfs2_glock_nq(ghs);
1318 }
1319
1320 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1321 if (!e)
1322 return -ENOMEM;
1323
1324 for (x = 0; x < num_gh; x++) {
1325 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1326 error = gfs2_glock_nq(&ghs[x]);
1327 if (error) {
1328 borked = 1;
1329 serious = error;
1330 num_gh = x;
1331 break;
1332 }
1333 }
1334
1335 for (x = 0; x < num_gh; x++) {
1336 error = e[x] = glock_wait_internal(&ghs[x]);
1337 if (error) {
1338 borked = 1;
1339 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1340 serious = error;
1341 }
1342 }
1343
1344 if (!borked) {
1345 kfree(e);
1346 return 0;
1347 }
1348
1349 for (x = 0; x < num_gh; x++)
1350 if (!e[x])
1351 gfs2_glock_dq(&ghs[x]);
1352
1353 if (serious)
1354 error = serious;
1355 else {
1356 for (x = 0; x < num_gh; x++)
1357 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1358 &ghs[x]);
1359 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1360 }
1361
1362 kfree(e);
1363
1364 return error;
1365}
1366
1367/**
1368 * gfs2_glock_dq_m - release multiple glocks
1369 * @num_gh: the number of structures
1370 * @ghs: an array of struct gfs2_holder structures
1371 *
1372 */
1373
1374void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1375{
1376 unsigned int x;
1377
1378 for (x = 0; x < num_gh; x++)
1379 gfs2_glock_dq(&ghs[x]);
1380}
1381
1382/**
1383 * gfs2_glock_dq_uninit_m - release multiple glocks
1384 * @num_gh: the number of structures
1385 * @ghs: an array of struct gfs2_holder structures
1386 *
1387 */
1388
1389void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1390{
1391 unsigned int x;
1392
1393 for (x = 0; x < num_gh; x++)
1394 gfs2_glock_dq_uninit(&ghs[x]);
1395}
1396
1397/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001398 * gfs2_lvb_hold - attach a LVB from a glock
1399 * @gl: The glock in question
1400 *
1401 */
1402
1403int gfs2_lvb_hold(struct gfs2_glock *gl)
1404{
1405 int error;
1406
1407 gfs2_glmutex_lock(gl);
1408
1409 if (!atomic_read(&gl->gl_lvb_count)) {
1410 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1411 if (error) {
1412 gfs2_glmutex_unlock(gl);
1413 return error;
1414 }
1415 gfs2_glock_hold(gl);
1416 }
1417 atomic_inc(&gl->gl_lvb_count);
1418
1419 gfs2_glmutex_unlock(gl);
1420
1421 return 0;
1422}
1423
1424/**
1425 * gfs2_lvb_unhold - detach a LVB from a glock
1426 * @gl: The glock in question
1427 *
1428 */
1429
1430void gfs2_lvb_unhold(struct gfs2_glock *gl)
1431{
1432 gfs2_glock_hold(gl);
1433 gfs2_glmutex_lock(gl);
1434
1435 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1436 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1437 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1438 gl->gl_lvb = NULL;
1439 gfs2_glock_put(gl);
1440 }
1441
1442 gfs2_glmutex_unlock(gl);
1443 gfs2_glock_put(gl);
1444}
1445
David Teiglandb3b94fa2006-01-16 16:50:04 +00001446static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1447 unsigned int state)
1448{
1449 struct gfs2_glock *gl;
1450
1451 gl = gfs2_glock_find(sdp, name);
1452 if (!gl)
1453 return;
1454
David Teiglandb3b94fa2006-01-16 16:50:04 +00001455 handle_callback(gl, state);
1456
1457 spin_lock(&gl->gl_spin);
1458 run_queue(gl);
1459 spin_unlock(&gl->gl_spin);
1460
1461 gfs2_glock_put(gl);
1462}
1463
1464/**
1465 * gfs2_glock_cb - Callback used by locking module
Steven Whitehouse1c089c32006-09-07 15:50:20 -04001466 * @sdp: Pointer to the superblock
David Teiglandb3b94fa2006-01-16 16:50:04 +00001467 * @type: Type of callback
1468 * @data: Type dependent data pointer
1469 *
1470 * Called by the locking module when it wants to tell us something.
1471 * Either we need to drop a lock, one of our ASYNC requests completed, or
1472 * a journal from another client needs to be recovered.
1473 */
1474
Steven Whitehouse9b47c112006-09-08 10:17:58 -04001475void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001476{
Steven Whitehouse9b47c112006-09-08 10:17:58 -04001477 struct gfs2_sbd *sdp = cb_data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001478
David Teiglandb3b94fa2006-01-16 16:50:04 +00001479 switch (type) {
1480 case LM_CB_NEED_E:
David Teiglande7f5c012006-04-27 11:25:45 -04001481 blocking_cb(sdp, data, LM_ST_UNLOCKED);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001482 return;
1483
1484 case LM_CB_NEED_D:
David Teiglande7f5c012006-04-27 11:25:45 -04001485 blocking_cb(sdp, data, LM_ST_DEFERRED);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001486 return;
1487
1488 case LM_CB_NEED_S:
David Teiglande7f5c012006-04-27 11:25:45 -04001489 blocking_cb(sdp, data, LM_ST_SHARED);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001490 return;
1491
1492 case LM_CB_ASYNC: {
David Teiglande7f5c012006-04-27 11:25:45 -04001493 struct lm_async_cb *async = data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001494 struct gfs2_glock *gl;
1495
Steven Whitehouse61be0842007-01-29 11:51:45 +00001496 down_read(&gfs2_umount_flush_sem);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001497 gl = gfs2_glock_find(sdp, &async->lc_name);
1498 if (gfs2_assert_warn(sdp, gl))
1499 return;
1500 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1501 gl->gl_req_bh(gl, async->lc_ret);
1502 gfs2_glock_put(gl);
Steven Whitehouse61be0842007-01-29 11:51:45 +00001503 up_read(&gfs2_umount_flush_sem);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001504 return;
1505 }
1506
1507 case LM_CB_NEED_RECOVERY:
1508 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1509 if (sdp->sd_recoverd_process)
1510 wake_up_process(sdp->sd_recoverd_process);
1511 return;
1512
1513 case LM_CB_DROPLOCKS:
1514 gfs2_gl_hash_clear(sdp, NO_WAIT);
1515 gfs2_quota_scan(sdp);
1516 return;
1517
1518 default:
1519 gfs2_assert_warn(sdp, 0);
1520 return;
1521 }
1522}
1523
1524/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001525 * demote_ok - Check to see if it's ok to unlock a glock
1526 * @gl: the glock
1527 *
1528 * Returns: 1 if it's ok
1529 */
1530
1531static int demote_ok(struct gfs2_glock *gl)
1532{
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001533 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001534 int demote = 1;
1535
1536 if (test_bit(GLF_STICKY, &gl->gl_flags))
1537 demote = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001538 else if (glops->go_demote_ok)
1539 demote = glops->go_demote_ok(gl);
1540
1541 return demote;
1542}
1543
1544/**
1545 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1546 * @gl: the glock
1547 *
1548 */
1549
1550void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1551{
1552 struct gfs2_sbd *sdp = gl->gl_sbd;
1553
1554 spin_lock(&sdp->sd_reclaim_lock);
1555 if (list_empty(&gl->gl_reclaim)) {
1556 gfs2_glock_hold(gl);
1557 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1558 atomic_inc(&sdp->sd_reclaim_count);
1559 }
1560 spin_unlock(&sdp->sd_reclaim_lock);
1561
1562 wake_up(&sdp->sd_reclaim_wq);
1563}
1564
1565/**
1566 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1567 * @sdp: the filesystem
1568 *
1569 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1570 * different glock and we notice that there are a lot of glocks in the
1571 * reclaim list.
1572 *
1573 */
1574
1575void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1576{
1577 struct gfs2_glock *gl;
1578
1579 spin_lock(&sdp->sd_reclaim_lock);
1580 if (list_empty(&sdp->sd_reclaim_list)) {
1581 spin_unlock(&sdp->sd_reclaim_lock);
1582 return;
1583 }
1584 gl = list_entry(sdp->sd_reclaim_list.next,
1585 struct gfs2_glock, gl_reclaim);
1586 list_del_init(&gl->gl_reclaim);
1587 spin_unlock(&sdp->sd_reclaim_lock);
1588
1589 atomic_dec(&sdp->sd_reclaim_count);
1590 atomic_inc(&sdp->sd_reclaimed);
1591
1592 if (gfs2_glmutex_trylock(gl)) {
Steven Whitehouse12132932007-01-22 13:09:04 -05001593 if (list_empty(&gl->gl_holders) &&
Steven Whitehouse50299962006-09-04 09:49:55 -04001594 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001595 handle_callback(gl, LM_ST_UNLOCKED);
1596 gfs2_glmutex_unlock(gl);
1597 }
1598
1599 gfs2_glock_put(gl);
1600}
1601
1602/**
1603 * examine_bucket - Call a function for glock in a hash bucket
1604 * @examiner: the function
1605 * @sdp: the filesystem
1606 * @bucket: the bucket
1607 *
1608 * Returns: 1 if the bucket has entries
1609 */
1610
1611static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
Steven Whitehouse37b2fa62006-09-08 13:35:56 -04001612 unsigned int hash)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001613{
Steven Whitehouse24264432006-09-11 21:40:30 -04001614 struct gfs2_glock *gl, *prev = NULL;
1615 int has_entries = 0;
Steven Whitehouseb6397892006-09-12 10:10:01 -04001616 struct hlist_head *head = &gl_hash_table[hash].hb_list;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001617
Steven Whitehouse24264432006-09-11 21:40:30 -04001618 read_lock(gl_lock_addr(hash));
Steven Whitehouseb6397892006-09-12 10:10:01 -04001619 /* Can't use hlist_for_each_entry - don't want prefetch here */
1620 if (hlist_empty(head))
Steven Whitehouse24264432006-09-11 21:40:30 -04001621 goto out;
Steven Whitehouseb6397892006-09-12 10:10:01 -04001622 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1623 while(1) {
Steven Whitehouse24264432006-09-11 21:40:30 -04001624 if (gl->gl_sbd == sdp) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001625 gfs2_glock_hold(gl);
Steven Whitehouse24264432006-09-11 21:40:30 -04001626 read_unlock(gl_lock_addr(hash));
1627 if (prev)
1628 gfs2_glock_put(prev);
1629 prev = gl;
1630 examiner(gl);
Steven Whitehousea8336342006-09-14 13:57:38 -04001631 has_entries = 1;
Steven Whitehouse24264432006-09-11 21:40:30 -04001632 read_lock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001633 }
Steven Whitehouseb6397892006-09-12 10:10:01 -04001634 if (gl->gl_list.next == NULL)
1635 break;
Steven Whitehouse24264432006-09-11 21:40:30 -04001636 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001637 }
Steven Whitehouse24264432006-09-11 21:40:30 -04001638out:
1639 read_unlock(gl_lock_addr(hash));
1640 if (prev)
1641 gfs2_glock_put(prev);
1642 return has_entries;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001643}
1644
1645/**
1646 * scan_glock - look at a glock and see if we can reclaim it
1647 * @gl: the glock to look at
1648 *
1649 */
1650
1651static void scan_glock(struct gfs2_glock *gl)
1652{
Steven Whitehouseb0041572006-11-23 10:51:34 -05001653 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
Steven Whitehouse24264432006-09-11 21:40:30 -04001654 return;
Steven Whitehousea2242db2006-08-24 17:03:05 -04001655
David Teiglandb3b94fa2006-01-16 16:50:04 +00001656 if (gfs2_glmutex_trylock(gl)) {
Steven Whitehouse12132932007-01-22 13:09:04 -05001657 if (list_empty(&gl->gl_holders) &&
Steven Whitehouse24264432006-09-11 21:40:30 -04001658 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001659 goto out_schedule;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001660 gfs2_glmutex_unlock(gl);
1661 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001662 return;
1663
Steven Whitehouse627add22006-07-05 13:16:19 -04001664out_schedule:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001665 gfs2_glmutex_unlock(gl);
1666 gfs2_glock_schedule_for_reclaim(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001667}
1668
1669/**
1670 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1671 * @sdp: the filesystem
1672 *
1673 */
1674
1675void gfs2_scand_internal(struct gfs2_sbd *sdp)
1676{
1677 unsigned int x;
1678
Steven Whitehouse94610612006-09-09 18:59:27 -04001679 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
Steven Whitehouse37b2fa62006-09-08 13:35:56 -04001680 examine_bucket(scan_glock, sdp, x);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001681}
1682
1683/**
1684 * clear_glock - look at a glock and see if we can free it from glock cache
1685 * @gl: the glock to look at
1686 *
1687 */
1688
1689static void clear_glock(struct gfs2_glock *gl)
1690{
1691 struct gfs2_sbd *sdp = gl->gl_sbd;
1692 int released;
1693
1694 spin_lock(&sdp->sd_reclaim_lock);
1695 if (!list_empty(&gl->gl_reclaim)) {
1696 list_del_init(&gl->gl_reclaim);
1697 atomic_dec(&sdp->sd_reclaim_count);
Steven Whitehouse190562b2006-04-20 16:57:23 -04001698 spin_unlock(&sdp->sd_reclaim_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001699 released = gfs2_glock_put(gl);
1700 gfs2_assert(sdp, !released);
Steven Whitehouse190562b2006-04-20 16:57:23 -04001701 } else {
1702 spin_unlock(&sdp->sd_reclaim_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001703 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001704
1705 if (gfs2_glmutex_trylock(gl)) {
Steven Whitehouse90101c32007-01-23 13:20:41 -05001706 if (list_empty(&gl->gl_holders) &&
David Teiglandb3b94fa2006-01-16 16:50:04 +00001707 gl->gl_state != LM_ST_UNLOCKED)
1708 handle_callback(gl, LM_ST_UNLOCKED);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001709 gfs2_glmutex_unlock(gl);
1710 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001711}
1712
1713/**
1714 * gfs2_gl_hash_clear - Empty out the glock hash table
1715 * @sdp: the filesystem
1716 * @wait: wait until it's all gone
1717 *
1718 * Called when unmounting the filesystem, or when inter-node lock manager
1719 * requests DROPLOCKS because it is running out of capacity.
1720 */
1721
1722void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1723{
1724 unsigned long t;
1725 unsigned int x;
1726 int cont;
1727
1728 t = jiffies;
1729
1730 for (;;) {
1731 cont = 0;
Steven Whitehouse24264432006-09-11 21:40:30 -04001732 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001733 if (examine_bucket(clear_glock, sdp, x))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001734 cont = 1;
Steven Whitehouse24264432006-09-11 21:40:30 -04001735 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001736
1737 if (!wait || !cont)
1738 break;
1739
1740 if (time_after_eq(jiffies,
1741 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1742 fs_warn(sdp, "Unmount seems to be stalled. "
1743 "Dumping lock state...\n");
1744 gfs2_dump_lockstate(sdp);
1745 t = jiffies;
1746 }
1747
Steven Whitehouse61be0842007-01-29 11:51:45 +00001748 down_write(&gfs2_umount_flush_sem);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001749 invalidate_inodes(sdp->sd_vfs);
Steven Whitehouse61be0842007-01-29 11:51:45 +00001750 up_write(&gfs2_umount_flush_sem);
Steven Whitehousefd88de562006-05-05 16:59:11 -04001751 msleep(10);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001752 }
1753}
1754
1755/*
1756 * Diagnostic routines to help debug distributed deadlock
1757 */
1758
1759/**
1760 * dump_holder - print information about a glock holder
1761 * @str: a string naming the type of holder
1762 * @gh: the glock holder
1763 *
1764 * Returns: 0 on success, -ENOBUFS when we run out of space
1765 */
1766
Robert Peterson7c52b162007-03-16 10:26:37 +00001767static int dump_holder(struct glock_iter *gi, char *str,
1768 struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001769{
1770 unsigned int x;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001771
Robert Peterson7c52b162007-03-16 10:26:37 +00001772 print_dbg(gi, " %s\n", str);
1773 print_dbg(gi, " owner = %ld\n",
David Teiglandb3b94fa2006-01-16 16:50:04 +00001774 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
Robert Peterson7c52b162007-03-16 10:26:37 +00001775 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1776 print_dbg(gi, " gh_flags =");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001777 for (x = 0; x < 32; x++)
1778 if (gh->gh_flags & (1 << x))
Robert Peterson7c52b162007-03-16 10:26:37 +00001779 print_dbg(gi, " %u", x);
1780 print_dbg(gi, " \n");
1781 print_dbg(gi, " error = %d\n", gh->gh_error);
1782 print_dbg(gi, " gh_iflags =");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001783 for (x = 0; x < 32; x++)
1784 if (test_bit(x, &gh->gh_iflags))
Robert Peterson7c52b162007-03-16 10:26:37 +00001785 print_dbg(gi, " %u", x);
1786 print_dbg(gi, " \n");
1787 if (gi)
1788 print_dbg(gi, " initialized at: 0x%x\n", gh->gh_ip);
1789 else
1790 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001791
Robert Peterson7c52b162007-03-16 10:26:37 +00001792 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001793}
1794
1795/**
1796 * dump_inode - print information about an inode
1797 * @ip: the inode
1798 *
1799 * Returns: 0 on success, -ENOBUFS when we run out of space
1800 */
1801
Robert Peterson7c52b162007-03-16 10:26:37 +00001802static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001803{
1804 unsigned int x;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001805
Robert Peterson7c52b162007-03-16 10:26:37 +00001806 print_dbg(gi, " Inode:\n");
1807 print_dbg(gi, " num = %llu/%llu\n",
1808 ip->i_num.no_formal_ino, ip->i_num.no_addr);
1809 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode));
1810 print_dbg(gi, " i_flags =");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001811 for (x = 0; x < 32; x++)
1812 if (test_bit(x, &ip->i_flags))
Robert Peterson7c52b162007-03-16 10:26:37 +00001813 print_dbg(gi, " %u", x);
1814 print_dbg(gi, " \n");
1815 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001816}
1817
1818/**
1819 * dump_glock - print information about a glock
1820 * @gl: the glock
1821 * @count: where we are in the buffer
1822 *
1823 * Returns: 0 on success, -ENOBUFS when we run out of space
1824 */
1825
Robert Peterson7c52b162007-03-16 10:26:37 +00001826static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001827{
1828 struct gfs2_holder *gh;
1829 unsigned int x;
1830 int error = -ENOBUFS;
1831
1832 spin_lock(&gl->gl_spin);
1833
Robert Peterson7c52b162007-03-16 10:26:37 +00001834 print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1835 (unsigned long long)gl->gl_name.ln_number);
1836 print_dbg(gi, " gl_flags =");
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001837 for (x = 0; x < 32; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001838 if (test_bit(x, &gl->gl_flags))
Robert Peterson7c52b162007-03-16 10:26:37 +00001839 print_dbg(gi, " %u", x);
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001840 }
Robert Peterson7c52b162007-03-16 10:26:37 +00001841 print_dbg(gi, " \n");
1842 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1843 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
1844 print_dbg(gi, " gl_owner = %s\n", gl->gl_owner->comm);
1845 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1846 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1847 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1848 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1849 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1850 print_dbg(gi, " le = %s\n",
David Teiglandb3b94fa2006-01-16 16:50:04 +00001851 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
Robert Peterson7c52b162007-03-16 10:26:37 +00001852 print_dbg(gi, " reclaim = %s\n",
1853 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001854 if (gl->gl_aspace)
Robert Peterson7c52b162007-03-16 10:26:37 +00001855 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1856 gl->gl_aspace->i_mapping->nrpages);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001857 else
Robert Peterson7c52b162007-03-16 10:26:37 +00001858 print_dbg(gi, " aspace = no\n");
1859 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001860 if (gl->gl_req_gh) {
Robert Peterson7c52b162007-03-16 10:26:37 +00001861 error = dump_holder(gi, "Request", gl->gl_req_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001862 if (error)
1863 goto out;
1864 }
1865 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
Robert Peterson7c52b162007-03-16 10:26:37 +00001866 error = dump_holder(gi, "Holder", gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001867 if (error)
1868 goto out;
1869 }
1870 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
Robert Peterson7c52b162007-03-16 10:26:37 +00001871 error = dump_holder(gi, "Waiter1", gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001872 if (error)
1873 goto out;
1874 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001875 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
Robert Peterson7c52b162007-03-16 10:26:37 +00001876 error = dump_holder(gi, "Waiter3", gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001877 if (error)
1878 goto out;
1879 }
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001880 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1881 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
1882 gl->gl_demote_state,
Steven Whitehouse420d2a12007-03-18 16:05:27 +00001883 (u64)(jiffies - gl->gl_demote_time)*(1000000/HZ));
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001884 }
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001885 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001886 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
Robert Peterson7c52b162007-03-16 10:26:37 +00001887 list_empty(&gl->gl_holders)) {
1888 error = dump_inode(gi, gl->gl_object);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001889 if (error)
1890 goto out;
1891 } else {
1892 error = -ENOBUFS;
Robert Peterson7c52b162007-03-16 10:26:37 +00001893 print_dbg(gi, " Inode: busy\n");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001894 }
1895 }
1896
1897 error = 0;
1898
Steven Whitehousea91ea692006-09-04 12:04:26 -04001899out:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001900 spin_unlock(&gl->gl_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001901 return error;
1902}
1903
1904/**
1905 * gfs2_dump_lockstate - print out the current lockstate
1906 * @sdp: the filesystem
1907 * @ub: the buffer to copy the information into
1908 *
1909 * If @ub is NULL, dump the lockstate to the console.
1910 *
1911 */
1912
Adrian Bunk08bc2db2006-04-28 10:59:12 -04001913static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001914{
David Teiglandb3b94fa2006-01-16 16:50:04 +00001915 struct gfs2_glock *gl;
Steven Whitehouseb6397892006-09-12 10:10:01 -04001916 struct hlist_node *h;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001917 unsigned int x;
1918 int error = 0;
1919
1920 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001921
Steven Whitehouse087efdd2006-09-09 16:59:11 -04001922 read_lock(gl_lock_addr(x));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001923
Steven Whitehouseb6397892006-09-12 10:10:01 -04001924 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001925 if (gl->gl_sbd != sdp)
1926 continue;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001927
Robert Peterson7c52b162007-03-16 10:26:37 +00001928 error = dump_glock(NULL, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001929 if (error)
1930 break;
1931 }
1932
Steven Whitehouse087efdd2006-09-09 16:59:11 -04001933 read_unlock(gl_lock_addr(x));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001934
1935 if (error)
1936 break;
1937 }
1938
1939
1940 return error;
1941}
1942
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001943int __init gfs2_glock_init(void)
1944{
1945 unsigned i;
1946 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
Steven Whitehouseb6397892006-09-12 10:10:01 -04001947 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001948 }
Steven Whitehouse087efdd2006-09-09 16:59:11 -04001949#ifdef GL_HASH_LOCK_SZ
1950 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1951 rwlock_init(&gl_hash_locks[i]);
1952 }
1953#endif
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001954 return 0;
1955}
1956
Robert Peterson7c52b162007-03-16 10:26:37 +00001957static int gfs2_glock_iter_next(struct glock_iter *gi)
1958{
1959 while (1) {
1960 if (!gi->hb_list) { /* If we don't have a hash bucket yet */
1961 gi->hb_list = &gl_hash_table[gi->hash].hb_list;
1962 if (hlist_empty(gi->hb_list)) {
1963 gi->hash++;
1964 gi->hb_list = NULL;
1965 if (gi->hash >= GFS2_GL_HASH_SIZE)
1966 return 1;
1967 else
1968 continue;
1969 }
1970 if (!hlist_empty(gi->hb_list)) {
1971 gi->gl = list_entry(gi->hb_list->first,
1972 struct gfs2_glock,
1973 gl_list);
1974 }
1975 } else {
1976 if (gi->gl->gl_list.next == NULL) {
1977 gi->hash++;
1978 gi->hb_list = NULL;
1979 continue;
1980 }
1981 gi->gl = list_entry(gi->gl->gl_list.next,
1982 struct gfs2_glock, gl_list);
1983 }
1984 if (gi->gl)
1985 break;
1986 }
1987 return 0;
1988}
1989
1990static void gfs2_glock_iter_free(struct glock_iter *gi)
1991{
1992 kfree(gi);
1993}
1994
1995static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
1996{
1997 struct glock_iter *gi;
1998
1999 gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2000 if (!gi)
2001 return NULL;
2002
2003 gi->sdp = sdp;
2004 gi->hash = 0;
2005 gi->gl = NULL;
2006 gi->hb_list = NULL;
2007 gi->seq = NULL;
2008 memset(gi->string, 0, sizeof(gi->string));
2009
2010 if (gfs2_glock_iter_next(gi)) {
2011 gfs2_glock_iter_free(gi);
2012 return NULL;
2013 }
2014
2015 return gi;
2016}
2017
2018static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2019{
2020 struct glock_iter *gi;
2021 loff_t n = *pos;
2022
2023 gi = gfs2_glock_iter_init(file->private);
2024 if (!gi)
2025 return NULL;
2026
2027 while (n--) {
2028 if (gfs2_glock_iter_next(gi)) {
2029 gfs2_glock_iter_free(gi);
2030 return NULL;
2031 }
2032 }
2033
2034 return gi;
2035}
2036
2037static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2038 loff_t *pos)
2039{
2040 struct glock_iter *gi = iter_ptr;
2041
2042 (*pos)++;
2043
2044 if (gfs2_glock_iter_next(gi)) {
2045 gfs2_glock_iter_free(gi);
2046 return NULL;
2047 }
2048
2049 return gi;
2050}
2051
2052static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2053{
2054 /* nothing for now */
2055}
2056
2057static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2058{
2059 struct glock_iter *gi = iter_ptr;
2060
2061 gi->seq = file;
2062 dump_glock(gi, gi->gl);
2063
2064 return 0;
2065}
2066
2067static struct seq_operations gfs2_glock_seq_ops = {
2068 .start = gfs2_glock_seq_start,
2069 .next = gfs2_glock_seq_next,
2070 .stop = gfs2_glock_seq_stop,
2071 .show = gfs2_glock_seq_show,
2072};
2073
2074static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2075{
2076 struct seq_file *seq;
2077 int ret;
2078
2079 ret = seq_open(file, &gfs2_glock_seq_ops);
2080 if (ret)
2081 return ret;
2082
2083 seq = file->private_data;
2084 seq->private = inode->i_private;
2085
2086 return 0;
2087}
2088
2089static const struct file_operations gfs2_debug_fops = {
2090 .owner = THIS_MODULE,
2091 .open = gfs2_debugfs_open,
2092 .read = seq_read,
2093 .llseek = seq_lseek,
2094 .release = seq_release
2095};
2096
2097int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2098{
2099 sdp->debugfs_dentry = debugfs_create_file(sdp->sd_table_name,
2100 S_IFREG | S_IRUGO,
2101 gfs2_root, sdp,
2102 &gfs2_debug_fops);
2103 if (!sdp->debugfs_dentry)
2104 return -ENOMEM;
2105
2106 return 0;
2107}
2108
2109void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2110{
Josef Whiter5c7342d2007-03-07 17:09:10 -05002111 if (sdp && sdp->debugfs_dentry)
Robert Peterson7c52b162007-03-16 10:26:37 +00002112 debugfs_remove(sdp->debugfs_dentry);
2113}
2114
2115int gfs2_register_debugfs(void)
2116{
2117 gfs2_root = debugfs_create_dir("gfs2", NULL);
2118 return gfs2_root ? 0 : -ENOMEM;
2119}
2120
2121void gfs2_unregister_debugfs(void)
2122{
2123 debugfs_remove(gfs2_root);
2124}