blob: 871ffc9578f2a8549954ed802e05ae4a3e6a8d7c [file] [log] [blame]
David Teigland869d81d2006-01-17 08:47:12 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teigland869d81d2006-01-17 08:47:12 +00008 */
David Teigland29b79982006-01-16 16:52:38 +00009
10#include "lock_dlm.h"
11
12static char junk_lvb[GDLM_LVB_SIZE];
13
Steven Whitehousef3c9d382008-05-21 17:21:42 +010014
15/* convert dlm lock-mode to gfs lock-state */
16
17static s16 gdlm_make_lmstate(s16 dlmmode)
18{
19 switch (dlmmode) {
20 case DLM_LOCK_IV:
21 case DLM_LOCK_NL:
22 return LM_ST_UNLOCKED;
23 case DLM_LOCK_EX:
24 return LM_ST_EXCLUSIVE;
25 case DLM_LOCK_CW:
26 return LM_ST_DEFERRED;
27 case DLM_LOCK_PR:
28 return LM_ST_SHARED;
29 }
30 gdlm_assert(0, "unknown DLM mode %d", dlmmode);
31 return -1;
32}
33
34/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
35 thread gets to it. */
36
37static void queue_submit(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +000038{
39 struct gdlm_ls *ls = lp->ls;
40
David Teigland29b79982006-01-16 16:52:38 +000041 spin_lock(&ls->async_lock);
Steven Whitehousef3c9d382008-05-21 17:21:42 +010042 list_add_tail(&lp->delay_list, &ls->submit);
David Teigland29b79982006-01-16 16:52:38 +000043 spin_unlock(&ls->async_lock);
44 wake_up(&ls->thread_wait);
45}
46
Steven Whitehousef3c9d382008-05-21 17:21:42 +010047static void wake_up_ast(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +000048{
Steven Whitehousef3c9d382008-05-21 17:21:42 +010049 clear_bit(LFL_AST_WAIT, &lp->flags);
50 smp_mb__after_clear_bit();
51 wake_up_bit(&lp->flags, LFL_AST_WAIT);
David Teigland29b79982006-01-16 16:52:38 +000052}
53
Steven Whitehousef3c9d382008-05-21 17:21:42 +010054static void gdlm_delete_lp(struct gdlm_lock *lp)
55{
56 struct gdlm_ls *ls = lp->ls;
57
58 spin_lock(&ls->async_lock);
59 if (!list_empty(&lp->delay_list))
60 list_del_init(&lp->delay_list);
61 gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
62 (unsigned long long)lp->lockname.ln_number);
63 list_del_init(&lp->all_list);
64 ls->all_locks_count--;
65 spin_unlock(&ls->async_lock);
66
67 kfree(lp);
68}
69
70static void gdlm_queue_delayed(struct gdlm_lock *lp)
71{
72 struct gdlm_ls *ls = lp->ls;
73
74 spin_lock(&ls->async_lock);
75 list_add_tail(&lp->delay_list, &ls->delayed);
76 spin_unlock(&ls->async_lock);
77}
78
79static void process_complete(struct gdlm_lock *lp)
80{
81 struct gdlm_ls *ls = lp->ls;
82 struct lm_async_cb acb;
83 s16 prev_mode = lp->cur;
84
85 memset(&acb, 0, sizeof(acb));
86
87 if (lp->lksb.sb_status == -DLM_ECANCEL) {
88 log_info("complete dlm cancel %x,%llx flags %lx",
89 lp->lockname.ln_type,
90 (unsigned long long)lp->lockname.ln_number,
91 lp->flags);
92
93 lp->req = lp->cur;
94 acb.lc_ret |= LM_OUT_CANCELED;
95 if (lp->cur == DLM_LOCK_IV)
96 lp->lksb.sb_lkid = 0;
97 goto out;
98 }
99
100 if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
101 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
102 log_info("unlock sb_status %d %x,%llx flags %lx",
103 lp->lksb.sb_status, lp->lockname.ln_type,
104 (unsigned long long)lp->lockname.ln_number,
105 lp->flags);
106 return;
107 }
108
109 lp->cur = DLM_LOCK_IV;
110 lp->req = DLM_LOCK_IV;
111 lp->lksb.sb_lkid = 0;
112
113 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
114 gdlm_delete_lp(lp);
115 return;
116 }
117 goto out;
118 }
119
120 if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
121 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
122
123 if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
124 if (lp->req == DLM_LOCK_PR)
125 lp->req = DLM_LOCK_CW;
126 else if (lp->req == DLM_LOCK_CW)
127 lp->req = DLM_LOCK_PR;
128 }
129
130 /*
131 * A canceled lock request. The lock was just taken off the delayed
132 * list and was never even submitted to dlm.
133 */
134
135 if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
136 log_info("complete internal cancel %x,%llx",
137 lp->lockname.ln_type,
138 (unsigned long long)lp->lockname.ln_number);
139 lp->req = lp->cur;
140 acb.lc_ret |= LM_OUT_CANCELED;
141 goto out;
142 }
143
144 /*
145 * An error occured.
146 */
147
148 if (lp->lksb.sb_status) {
149 /* a "normal" error */
150 if ((lp->lksb.sb_status == -EAGAIN) &&
151 (lp->lkf & DLM_LKF_NOQUEUE)) {
152 lp->req = lp->cur;
153 if (lp->cur == DLM_LOCK_IV)
154 lp->lksb.sb_lkid = 0;
155 goto out;
156 }
157
158 /* this could only happen with cancels I think */
159 log_info("ast sb_status %d %x,%llx flags %lx",
160 lp->lksb.sb_status, lp->lockname.ln_type,
161 (unsigned long long)lp->lockname.ln_number,
162 lp->flags);
163 if (lp->lksb.sb_status == -EDEADLOCK &&
164 lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
165 lp->req = lp->cur;
166 acb.lc_ret |= LM_OUT_CONV_DEADLK;
167 if (lp->cur == DLM_LOCK_IV)
168 lp->lksb.sb_lkid = 0;
169 goto out;
170 } else
171 return;
172 }
173
174 /*
175 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
176 */
177
178 if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
179 wake_up_ast(lp);
180 return;
181 }
182
183 /*
184 * A lock has been demoted to NL because it initially completed during
185 * BLOCK_LOCKS. Now it must be requested in the originally requested
186 * mode.
187 */
188
189 if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
190 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
191 lp->lockname.ln_type,
192 (unsigned long long)lp->lockname.ln_number);
193 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
194 lp->lockname.ln_type,
195 (unsigned long long)lp->lockname.ln_number);
196
197 lp->cur = DLM_LOCK_NL;
198 lp->req = lp->prev_req;
199 lp->prev_req = DLM_LOCK_IV;
200 lp->lkf &= ~DLM_LKF_CONVDEADLK;
201
202 set_bit(LFL_NOCACHE, &lp->flags);
203
204 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
205 !test_bit(LFL_NOBLOCK, &lp->flags))
206 gdlm_queue_delayed(lp);
207 else
208 queue_submit(lp);
209 return;
210 }
211
212 /*
213 * A request is granted during dlm recovery. It may be granted
214 * because the locks of a failed node were cleared. In that case,
215 * there may be inconsistent data beneath this lock and we must wait
216 * for recovery to complete to use it. When gfs recovery is done this
217 * granted lock will be converted to NL and then reacquired in this
218 * granted state.
219 */
220
221 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
222 !test_bit(LFL_NOBLOCK, &lp->flags) &&
223 lp->req != DLM_LOCK_NL) {
224
225 lp->cur = lp->req;
226 lp->prev_req = lp->req;
227 lp->req = DLM_LOCK_NL;
228 lp->lkf |= DLM_LKF_CONVERT;
229 lp->lkf &= ~DLM_LKF_CONVDEADLK;
230
231 log_debug("rereq %x,%llx id %x %d,%d",
232 lp->lockname.ln_type,
233 (unsigned long long)lp->lockname.ln_number,
234 lp->lksb.sb_lkid, lp->cur, lp->req);
235
236 set_bit(LFL_REREQUEST, &lp->flags);
237 queue_submit(lp);
238 return;
239 }
240
241 /*
242 * DLM demoted the lock to NL before it was granted so GFS must be
243 * told it cannot cache data for this lock.
244 */
245
246 if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
247 set_bit(LFL_NOCACHE, &lp->flags);
248
249out:
250 /*
251 * This is an internal lock_dlm lock
252 */
253
254 if (test_bit(LFL_INLOCK, &lp->flags)) {
255 clear_bit(LFL_NOBLOCK, &lp->flags);
256 lp->cur = lp->req;
257 wake_up_ast(lp);
258 return;
259 }
260
261 /*
262 * Normal completion of a lock request. Tell GFS it now has the lock.
263 */
264
265 clear_bit(LFL_NOBLOCK, &lp->flags);
266 lp->cur = lp->req;
267
268 acb.lc_name = lp->lockname;
269 acb.lc_ret |= gdlm_make_lmstate(lp->cur);
270
271 if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
272 (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
273 acb.lc_ret |= LM_OUT_CACHEABLE;
274
275 ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
276}
277
278static void gdlm_ast(void *astarg)
David Teigland29b79982006-01-16 16:52:38 +0000279{
280 struct gdlm_lock *lp = astarg;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100281 clear_bit(LFL_ACTIVE, &lp->flags);
282 process_complete(lp);
283}
284
285static void process_blocking(struct gdlm_lock *lp, int bast_mode)
286{
David Teigland29b79982006-01-16 16:52:38 +0000287 struct gdlm_ls *ls = lp->ls;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100288 unsigned int cb = 0;
289
290 switch (gdlm_make_lmstate(bast_mode)) {
291 case LM_ST_EXCLUSIVE:
292 cb = LM_CB_NEED_E;
293 break;
294 case LM_ST_DEFERRED:
295 cb = LM_CB_NEED_D;
296 break;
297 case LM_ST_SHARED:
298 cb = LM_CB_NEED_S;
299 break;
300 default:
301 gdlm_assert(0, "unknown bast mode %u", bast_mode);
302 }
303
304 ls->fscb(ls->sdp, cb, &lp->lockname);
305}
306
307
308static void gdlm_bast(void *astarg, int mode)
309{
310 struct gdlm_lock *lp = astarg;
David Teigland29b79982006-01-16 16:52:38 +0000311
312 if (!mode) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500313 printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
David Teigland9229f012006-05-24 09:21:30 -0400314 lp->lockname.ln_type,
315 (unsigned long long)lp->lockname.ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000316 return;
317 }
318
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100319 process_blocking(lp, mode);
David Teigland29b79982006-01-16 16:52:38 +0000320}
321
322/* convert gfs lock-state to dlm lock-mode */
323
Steven Whitehousecd915492006-09-04 12:49:07 -0400324static s16 make_mode(s16 lmstate)
David Teigland29b79982006-01-16 16:52:38 +0000325{
326 switch (lmstate) {
327 case LM_ST_UNLOCKED:
328 return DLM_LOCK_NL;
329 case LM_ST_EXCLUSIVE:
330 return DLM_LOCK_EX;
331 case LM_ST_DEFERRED:
332 return DLM_LOCK_CW;
333 case LM_ST_SHARED:
334 return DLM_LOCK_PR;
David Teigland29b79982006-01-16 16:52:38 +0000335 }
David Teigland869d81d2006-01-17 08:47:12 +0000336 gdlm_assert(0, "unknown LM state %d", lmstate);
337 return -1;
David Teigland29b79982006-01-16 16:52:38 +0000338}
339
David Teigland29b79982006-01-16 16:52:38 +0000340
341/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
342 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
343
344static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
345{
Steven Whitehousecd915492006-09-04 12:49:07 -0400346 s16 cur = make_mode(cur_state);
David Teigland29b79982006-01-16 16:52:38 +0000347 if (lp->cur != DLM_LOCK_IV)
David Teigland869d81d2006-01-17 08:47:12 +0000348 gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
David Teigland29b79982006-01-16 16:52:38 +0000349}
350
351static inline unsigned int make_flags(struct gdlm_lock *lp,
352 unsigned int gfs_flags,
Steven Whitehousecd915492006-09-04 12:49:07 -0400353 s16 cur, s16 req)
David Teigland29b79982006-01-16 16:52:38 +0000354{
355 unsigned int lkf = 0;
356
357 if (gfs_flags & LM_FLAG_TRY)
358 lkf |= DLM_LKF_NOQUEUE;
359
360 if (gfs_flags & LM_FLAG_TRY_1CB) {
361 lkf |= DLM_LKF_NOQUEUE;
362 lkf |= DLM_LKF_NOQUEUEBAST;
363 }
364
365 if (gfs_flags & LM_FLAG_PRIORITY) {
366 lkf |= DLM_LKF_NOORDER;
367 lkf |= DLM_LKF_HEADQUE;
368 }
369
370 if (gfs_flags & LM_FLAG_ANY) {
371 if (req == DLM_LOCK_PR)
372 lkf |= DLM_LKF_ALTCW;
373 else if (req == DLM_LOCK_CW)
374 lkf |= DLM_LKF_ALTPR;
375 }
376
377 if (lp->lksb.sb_lkid != 0) {
378 lkf |= DLM_LKF_CONVERT;
379
380 /* Conversion deadlock avoidance by DLM */
381
Benjamin Marzinski58e9fee2008-03-14 13:52:52 -0500382 if (!(lp->ls->fsflags & LM_MFLAG_CONV_NODROP) &&
383 !test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
David Teigland29b79982006-01-16 16:52:38 +0000384 !(lkf & DLM_LKF_NOQUEUE) &&
385 cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
386 lkf |= DLM_LKF_CONVDEADLK;
387 }
388
389 if (lp->lvb)
390 lkf |= DLM_LKF_VALBLK;
391
392 return lkf;
393}
394
395/* make_strname - convert GFS lock numbers to a string */
396
Steven Whitehousef35ac342007-03-18 17:04:15 +0000397static inline void make_strname(const struct lm_lockname *lockname,
David Teigland29b79982006-01-16 16:52:38 +0000398 struct gdlm_strname *str)
399{
David Teigland869d81d2006-01-17 08:47:12 +0000400 sprintf(str->name, "%8x%16llx", lockname->ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400401 (unsigned long long)lockname->ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000402 str->namelen = GDLM_STRNAME_BYTES;
403}
404
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400405static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
406 struct gdlm_lock **lpp)
David Teigland29b79982006-01-16 16:52:38 +0000407{
408 struct gdlm_lock *lp;
409
Josef Bacik16c5f062008-04-09 09:33:41 -0400410 lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
David Teigland29b79982006-01-16 16:52:38 +0000411 if (!lp)
412 return -ENOMEM;
413
David Teigland29b79982006-01-16 16:52:38 +0000414 lp->lockname = *name;
Steven Whitehousef35ac342007-03-18 17:04:15 +0000415 make_strname(name, &lp->strname);
David Teigland29b79982006-01-16 16:52:38 +0000416 lp->ls = ls;
417 lp->cur = DLM_LOCK_IV;
David Teigland29b79982006-01-16 16:52:38 +0000418 INIT_LIST_HEAD(&lp->delay_list);
419
420 spin_lock(&ls->async_lock);
421 list_add(&lp->all_list, &ls->all_locks);
422 ls->all_locks_count++;
423 spin_unlock(&ls->async_lock);
424
425 *lpp = lp;
426 return 0;
427}
428
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400429int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
430 void **lockp)
David Teigland29b79982006-01-16 16:52:38 +0000431{
432 struct gdlm_lock *lp;
433 int error;
434
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400435 error = gdlm_create_lp(lockspace, name, &lp);
David Teigland29b79982006-01-16 16:52:38 +0000436
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400437 *lockp = lp;
David Teigland29b79982006-01-16 16:52:38 +0000438 return error;
439}
440
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400441void gdlm_put_lock(void *lock)
David Teigland29b79982006-01-16 16:52:38 +0000442{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400443 gdlm_delete_lp(lock);
David Teigland29b79982006-01-16 16:52:38 +0000444}
445
David Teigland8d3b35a2006-02-23 10:00:56 +0000446unsigned int gdlm_do_lock(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000447{
448 struct gdlm_ls *ls = lp->ls;
David Teigland29b79982006-01-16 16:52:38 +0000449 int error, bast = 1;
450
451 /*
452 * When recovery is in progress, delay lock requests for submission
453 * once recovery is done. Requests for recovery (NOEXP) and unlocks
454 * can pass.
455 */
456
457 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
458 !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
459 gdlm_queue_delayed(lp);
David Teigland869d81d2006-01-17 08:47:12 +0000460 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000461 }
462
463 /*
464 * Submit the actual lock request.
465 */
466
467 if (test_bit(LFL_NOBAST, &lp->flags))
468 bast = 0;
469
David Teigland29b79982006-01-16 16:52:38 +0000470 set_bit(LFL_ACTIVE, &lp->flags);
471
David Teigland869d81d2006-01-17 08:47:12 +0000472 log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400473 (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
David Teigland29b79982006-01-16 16:52:38 +0000474 lp->cur, lp->req, lp->lkf);
475
476 error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
Steven Whitehousef35ac342007-03-18 17:04:15 +0000477 lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
478 lp, bast ? gdlm_bast : NULL);
David Teigland29b79982006-01-16 16:52:38 +0000479
480 if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
481 lp->lksb.sb_status = -EAGAIN;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100482 gdlm_ast(lp);
David Teigland29b79982006-01-16 16:52:38 +0000483 error = 0;
484 }
485
David Teigland869d81d2006-01-17 08:47:12 +0000486 if (error) {
David Teiglandb9af8a72007-03-28 11:08:04 -0500487 log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
David Teigland869d81d2006-01-17 08:47:12 +0000488 "flags=%lx", ls->fsname, lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400489 (unsigned long long)lp->lockname.ln_number, error,
490 lp->cur, lp->req, lp->lkf, lp->flags);
David Teigland869d81d2006-01-17 08:47:12 +0000491 return LM_OUT_ERROR;
492 }
493 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000494}
495
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400496static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000497{
David Teigland869d81d2006-01-17 08:47:12 +0000498 struct gdlm_ls *ls = lp->ls;
David Teigland29b79982006-01-16 16:52:38 +0000499 unsigned int lkf = 0;
500 int error;
501
502 set_bit(LFL_DLM_UNLOCK, &lp->flags);
503 set_bit(LFL_ACTIVE, &lp->flags);
504
505 if (lp->lvb)
506 lkf = DLM_LKF_VALBLK;
507
David Teigland869d81d2006-01-17 08:47:12 +0000508 log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400509 (unsigned long long)lp->lockname.ln_number,
510 lp->lksb.sb_lkid, lp->cur, lkf);
David Teigland29b79982006-01-16 16:52:38 +0000511
David Teigland869d81d2006-01-17 08:47:12 +0000512 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
David Teigland29b79982006-01-16 16:52:38 +0000513
David Teigland869d81d2006-01-17 08:47:12 +0000514 if (error) {
David Teiglandb9af8a72007-03-28 11:08:04 -0500515 log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
David Teigland869d81d2006-01-17 08:47:12 +0000516 "flags=%lx", ls->fsname, lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400517 (unsigned long long)lp->lockname.ln_number, error,
518 lp->cur, lp->req, lp->lkf, lp->flags);
David Teigland869d81d2006-01-17 08:47:12 +0000519 return LM_OUT_ERROR;
520 }
521 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000522}
523
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400524unsigned int gdlm_lock(void *lock, unsigned int cur_state,
David Teigland29b79982006-01-16 16:52:38 +0000525 unsigned int req_state, unsigned int flags)
526{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400527 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000528
Steven Whitehouse6802e342008-05-21 17:03:22 +0100529 if (req_state == LM_ST_UNLOCKED)
530 return gdlm_unlock(lock, cur_state);
531
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100532 if (req_state == LM_ST_UNLOCKED)
533 return gdlm_unlock(lock, cur_state);
534
David Teigland29b79982006-01-16 16:52:38 +0000535 clear_bit(LFL_DLM_CANCEL, &lp->flags);
536 if (flags & LM_FLAG_NOEXP)
537 set_bit(LFL_NOBLOCK, &lp->flags);
538
539 check_cur_state(lp, cur_state);
540 lp->req = make_mode(req_state);
541 lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
542
David Teigland8d3b35a2006-02-23 10:00:56 +0000543 return gdlm_do_lock(lp);
David Teigland29b79982006-01-16 16:52:38 +0000544}
545
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400546unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
David Teigland29b79982006-01-16 16:52:38 +0000547{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400548 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000549
550 clear_bit(LFL_DLM_CANCEL, &lp->flags);
551 if (lp->cur == DLM_LOCK_IV)
552 return 0;
David Teigland869d81d2006-01-17 08:47:12 +0000553 return gdlm_do_unlock(lp);
David Teigland29b79982006-01-16 16:52:38 +0000554}
555
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400556void gdlm_cancel(void *lock)
David Teigland29b79982006-01-16 16:52:38 +0000557{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400558 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000559 struct gdlm_ls *ls = lp->ls;
560 int error, delay_list = 0;
561
562 if (test_bit(LFL_DLM_CANCEL, &lp->flags))
563 return;
564
David Teigland9229f012006-05-24 09:21:30 -0400565 log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
566 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000567
568 spin_lock(&ls->async_lock);
569 if (!list_empty(&lp->delay_list)) {
570 list_del_init(&lp->delay_list);
571 delay_list = 1;
572 }
573 spin_unlock(&ls->async_lock);
574
575 if (delay_list) {
576 set_bit(LFL_CANCEL, &lp->flags);
577 set_bit(LFL_ACTIVE, &lp->flags);
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100578 gdlm_ast(lp);
David Teigland29b79982006-01-16 16:52:38 +0000579 return;
580 }
581
582 if (!test_bit(LFL_ACTIVE, &lp->flags) ||
David Teigland9229f012006-05-24 09:21:30 -0400583 test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
David Teigland869d81d2006-01-17 08:47:12 +0000584 log_info("gdlm_cancel skip %x,%llx flags %lx",
David Teigland9229f012006-05-24 09:21:30 -0400585 lp->lockname.ln_type,
586 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000587 return;
588 }
589
590 /* the lock is blocked in the dlm */
591
592 set_bit(LFL_DLM_CANCEL, &lp->flags);
593 set_bit(LFL_ACTIVE, &lp->flags);
594
595 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
596 NULL, lp);
597
David Teigland869d81d2006-01-17 08:47:12 +0000598 log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
David Teigland9229f012006-05-24 09:21:30 -0400599 lp->lockname.ln_type,
600 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000601
602 if (error == -EBUSY)
603 clear_bit(LFL_DLM_CANCEL, &lp->flags);
604}
605
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400606static int gdlm_add_lvb(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000607{
608 char *lvb;
609
Josef Bacik16c5f062008-04-09 09:33:41 -0400610 lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
David Teigland29b79982006-01-16 16:52:38 +0000611 if (!lvb)
612 return -ENOMEM;
613
David Teigland29b79982006-01-16 16:52:38 +0000614 lp->lksb.sb_lvbptr = lvb;
615 lp->lvb = lvb;
616 return 0;
617}
618
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400619static void gdlm_del_lvb(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000620{
621 kfree(lp->lvb);
622 lp->lvb = NULL;
623 lp->lksb.sb_lvbptr = NULL;
624}
625
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100626static int gdlm_ast_wait(void *word)
627{
628 schedule();
629 return 0;
630}
631
David Teigland29b79982006-01-16 16:52:38 +0000632/* This can do a synchronous dlm request (requiring a lock_dlm thread to get
633 the completion) because gfs won't call hold_lvb() during a callback (from
634 the context of a lock_dlm thread). */
635
636static int hold_null_lock(struct gdlm_lock *lp)
637{
638 struct gdlm_lock *lpn = NULL;
639 int error;
640
641 if (lp->hold_null) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500642 printk(KERN_INFO "lock_dlm: lvb already held\n");
David Teigland29b79982006-01-16 16:52:38 +0000643 return 0;
644 }
645
646 error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
647 if (error)
648 goto out;
649
650 lpn->lksb.sb_lvbptr = junk_lvb;
651 lpn->lvb = junk_lvb;
652
653 lpn->req = DLM_LOCK_NL;
654 lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
655 set_bit(LFL_NOBAST, &lpn->flags);
656 set_bit(LFL_INLOCK, &lpn->flags);
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100657 set_bit(LFL_AST_WAIT, &lpn->flags);
David Teigland29b79982006-01-16 16:52:38 +0000658
David Teigland8d3b35a2006-02-23 10:00:56 +0000659 gdlm_do_lock(lpn);
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100660 wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
David Teiglandc5921fd2006-07-20 09:06:34 -0500661 error = lpn->lksb.sb_status;
David Teigland29b79982006-01-16 16:52:38 +0000662 if (error) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500663 printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
664 error);
David Teigland29b79982006-01-16 16:52:38 +0000665 gdlm_delete_lp(lpn);
666 lpn = NULL;
667 }
Steven Whitehousea91ea692006-09-04 12:04:26 -0400668out:
David Teigland29b79982006-01-16 16:52:38 +0000669 lp->hold_null = lpn;
670 return error;
671}
672
673/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
674 the completion) because gfs may call unhold_lvb() during a callback (from
675 the context of a lock_dlm thread) which could cause a deadlock since the
676 other lock_dlm thread could be engaged in recovery. */
677
678static void unhold_null_lock(struct gdlm_lock *lp)
679{
680 struct gdlm_lock *lpn = lp->hold_null;
681
David Teigland9229f012006-05-24 09:21:30 -0400682 gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
683 (unsigned long long)lp->lockname.ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000684 lpn->lksb.sb_lvbptr = NULL;
685 lpn->lvb = NULL;
686 set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
687 gdlm_do_unlock(lpn);
688 lp->hold_null = NULL;
689}
690
691/* Acquire a NL lock because gfs requires the value block to remain
692 intact on the resource while the lvb is "held" even if it's holding no locks
693 on the resource. */
694
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400695int gdlm_hold_lvb(void *lock, char **lvbp)
David Teigland29b79982006-01-16 16:52:38 +0000696{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400697 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000698 int error;
699
700 error = gdlm_add_lvb(lp);
701 if (error)
702 return error;
703
704 *lvbp = lp->lvb;
705
706 error = hold_null_lock(lp);
707 if (error)
708 gdlm_del_lvb(lp);
709
710 return error;
711}
712
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400713void gdlm_unhold_lvb(void *lock, char *lvb)
David Teigland29b79982006-01-16 16:52:38 +0000714{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400715 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000716
717 unhold_null_lock(lp);
718 gdlm_del_lvb(lp);
719}
720
David Teigland29b79982006-01-16 16:52:38 +0000721void gdlm_submit_delayed(struct gdlm_ls *ls)
722{
723 struct gdlm_lock *lp, *safe;
724
725 spin_lock(&ls->async_lock);
726 list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
727 list_del_init(&lp->delay_list);
728 list_add_tail(&lp->delay_list, &ls->submit);
729 }
730 spin_unlock(&ls->async_lock);
731 wake_up(&ls->thread_wait);
732}
733
734int gdlm_release_all_locks(struct gdlm_ls *ls)
735{
736 struct gdlm_lock *lp, *safe;
737 int count = 0;
738
739 spin_lock(&ls->async_lock);
740 list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
741 list_del_init(&lp->all_list);
742
743 if (lp->lvb && lp->lvb != junk_lvb)
744 kfree(lp->lvb);
745 kfree(lp);
746 count++;
747 }
748 spin_unlock(&ls->async_lock);
749
750 return count;
751}
752