blob: 894df4567a030adafb5bc43cadbcb73c82d7f5da [file] [log] [blame]
David Teigland869d81d2006-01-17 08:47:12 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teigland869d81d2006-01-17 08:47:12 +00008 */
David Teigland29b79982006-01-16 16:52:38 +00009
10#include "lock_dlm.h"
11
12static char junk_lvb[GDLM_LVB_SIZE];
13
Steven Whitehousef3c9d382008-05-21 17:21:42 +010014
15/* convert dlm lock-mode to gfs lock-state */
16
17static s16 gdlm_make_lmstate(s16 dlmmode)
18{
19 switch (dlmmode) {
20 case DLM_LOCK_IV:
21 case DLM_LOCK_NL:
22 return LM_ST_UNLOCKED;
23 case DLM_LOCK_EX:
24 return LM_ST_EXCLUSIVE;
25 case DLM_LOCK_CW:
26 return LM_ST_DEFERRED;
27 case DLM_LOCK_PR:
28 return LM_ST_SHARED;
29 }
30 gdlm_assert(0, "unknown DLM mode %d", dlmmode);
31 return -1;
32}
33
34/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
35 thread gets to it. */
36
37static void queue_submit(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +000038{
39 struct gdlm_ls *ls = lp->ls;
40
David Teigland29b79982006-01-16 16:52:38 +000041 spin_lock(&ls->async_lock);
Steven Whitehousef3c9d382008-05-21 17:21:42 +010042 list_add_tail(&lp->delay_list, &ls->submit);
David Teigland29b79982006-01-16 16:52:38 +000043 spin_unlock(&ls->async_lock);
44 wake_up(&ls->thread_wait);
45}
46
Steven Whitehousef3c9d382008-05-21 17:21:42 +010047static void wake_up_ast(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +000048{
Steven Whitehousef3c9d382008-05-21 17:21:42 +010049 clear_bit(LFL_AST_WAIT, &lp->flags);
50 smp_mb__after_clear_bit();
51 wake_up_bit(&lp->flags, LFL_AST_WAIT);
David Teigland29b79982006-01-16 16:52:38 +000052}
53
Steven Whitehousef3c9d382008-05-21 17:21:42 +010054static void gdlm_delete_lp(struct gdlm_lock *lp)
55{
56 struct gdlm_ls *ls = lp->ls;
57
58 spin_lock(&ls->async_lock);
59 if (!list_empty(&lp->delay_list))
60 list_del_init(&lp->delay_list);
61 gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
62 (unsigned long long)lp->lockname.ln_number);
63 list_del_init(&lp->all_list);
64 ls->all_locks_count--;
65 spin_unlock(&ls->async_lock);
66
67 kfree(lp);
68}
69
70static void gdlm_queue_delayed(struct gdlm_lock *lp)
71{
72 struct gdlm_ls *ls = lp->ls;
73
74 spin_lock(&ls->async_lock);
75 list_add_tail(&lp->delay_list, &ls->delayed);
76 spin_unlock(&ls->async_lock);
77}
78
79static void process_complete(struct gdlm_lock *lp)
80{
81 struct gdlm_ls *ls = lp->ls;
82 struct lm_async_cb acb;
Steven Whitehousef3c9d382008-05-21 17:21:42 +010083
84 memset(&acb, 0, sizeof(acb));
85
86 if (lp->lksb.sb_status == -DLM_ECANCEL) {
87 log_info("complete dlm cancel %x,%llx flags %lx",
88 lp->lockname.ln_type,
89 (unsigned long long)lp->lockname.ln_number,
90 lp->flags);
91
92 lp->req = lp->cur;
93 acb.lc_ret |= LM_OUT_CANCELED;
94 if (lp->cur == DLM_LOCK_IV)
95 lp->lksb.sb_lkid = 0;
96 goto out;
97 }
98
99 if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
100 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
101 log_info("unlock sb_status %d %x,%llx flags %lx",
102 lp->lksb.sb_status, lp->lockname.ln_type,
103 (unsigned long long)lp->lockname.ln_number,
104 lp->flags);
105 return;
106 }
107
108 lp->cur = DLM_LOCK_IV;
109 lp->req = DLM_LOCK_IV;
110 lp->lksb.sb_lkid = 0;
111
112 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
113 gdlm_delete_lp(lp);
114 return;
115 }
116 goto out;
117 }
118
119 if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
120 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
121
122 if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
123 if (lp->req == DLM_LOCK_PR)
124 lp->req = DLM_LOCK_CW;
125 else if (lp->req == DLM_LOCK_CW)
126 lp->req = DLM_LOCK_PR;
127 }
128
129 /*
130 * A canceled lock request. The lock was just taken off the delayed
131 * list and was never even submitted to dlm.
132 */
133
134 if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
135 log_info("complete internal cancel %x,%llx",
136 lp->lockname.ln_type,
137 (unsigned long long)lp->lockname.ln_number);
138 lp->req = lp->cur;
139 acb.lc_ret |= LM_OUT_CANCELED;
140 goto out;
141 }
142
143 /*
144 * An error occured.
145 */
146
147 if (lp->lksb.sb_status) {
148 /* a "normal" error */
149 if ((lp->lksb.sb_status == -EAGAIN) &&
150 (lp->lkf & DLM_LKF_NOQUEUE)) {
151 lp->req = lp->cur;
152 if (lp->cur == DLM_LOCK_IV)
153 lp->lksb.sb_lkid = 0;
154 goto out;
155 }
156
157 /* this could only happen with cancels I think */
158 log_info("ast sb_status %d %x,%llx flags %lx",
159 lp->lksb.sb_status, lp->lockname.ln_type,
160 (unsigned long long)lp->lockname.ln_number,
161 lp->flags);
Steven Whitehouseb2cad262008-06-03 14:34:14 +0100162 return;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100163 }
164
165 /*
166 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
167 */
168
169 if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
170 wake_up_ast(lp);
171 return;
172 }
173
174 /*
175 * A lock has been demoted to NL because it initially completed during
176 * BLOCK_LOCKS. Now it must be requested in the originally requested
177 * mode.
178 */
179
180 if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
181 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
182 lp->lockname.ln_type,
183 (unsigned long long)lp->lockname.ln_number);
184 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
185 lp->lockname.ln_type,
186 (unsigned long long)lp->lockname.ln_number);
187
188 lp->cur = DLM_LOCK_NL;
189 lp->req = lp->prev_req;
190 lp->prev_req = DLM_LOCK_IV;
191 lp->lkf &= ~DLM_LKF_CONVDEADLK;
192
193 set_bit(LFL_NOCACHE, &lp->flags);
194
195 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
196 !test_bit(LFL_NOBLOCK, &lp->flags))
197 gdlm_queue_delayed(lp);
198 else
199 queue_submit(lp);
200 return;
201 }
202
203 /*
204 * A request is granted during dlm recovery. It may be granted
205 * because the locks of a failed node were cleared. In that case,
206 * there may be inconsistent data beneath this lock and we must wait
207 * for recovery to complete to use it. When gfs recovery is done this
208 * granted lock will be converted to NL and then reacquired in this
209 * granted state.
210 */
211
212 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
213 !test_bit(LFL_NOBLOCK, &lp->flags) &&
214 lp->req != DLM_LOCK_NL) {
215
216 lp->cur = lp->req;
217 lp->prev_req = lp->req;
218 lp->req = DLM_LOCK_NL;
219 lp->lkf |= DLM_LKF_CONVERT;
220 lp->lkf &= ~DLM_LKF_CONVDEADLK;
221
222 log_debug("rereq %x,%llx id %x %d,%d",
223 lp->lockname.ln_type,
224 (unsigned long long)lp->lockname.ln_number,
225 lp->lksb.sb_lkid, lp->cur, lp->req);
226
227 set_bit(LFL_REREQUEST, &lp->flags);
228 queue_submit(lp);
229 return;
230 }
231
232 /*
233 * DLM demoted the lock to NL before it was granted so GFS must be
234 * told it cannot cache data for this lock.
235 */
236
237 if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
238 set_bit(LFL_NOCACHE, &lp->flags);
239
240out:
241 /*
242 * This is an internal lock_dlm lock
243 */
244
245 if (test_bit(LFL_INLOCK, &lp->flags)) {
246 clear_bit(LFL_NOBLOCK, &lp->flags);
247 lp->cur = lp->req;
248 wake_up_ast(lp);
249 return;
250 }
251
252 /*
253 * Normal completion of a lock request. Tell GFS it now has the lock.
254 */
255
256 clear_bit(LFL_NOBLOCK, &lp->flags);
257 lp->cur = lp->req;
258
259 acb.lc_name = lp->lockname;
260 acb.lc_ret |= gdlm_make_lmstate(lp->cur);
261
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100262 ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
263}
264
265static void gdlm_ast(void *astarg)
David Teigland29b79982006-01-16 16:52:38 +0000266{
267 struct gdlm_lock *lp = astarg;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100268 clear_bit(LFL_ACTIVE, &lp->flags);
269 process_complete(lp);
270}
271
272static void process_blocking(struct gdlm_lock *lp, int bast_mode)
273{
David Teigland29b79982006-01-16 16:52:38 +0000274 struct gdlm_ls *ls = lp->ls;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100275 unsigned int cb = 0;
276
277 switch (gdlm_make_lmstate(bast_mode)) {
278 case LM_ST_EXCLUSIVE:
279 cb = LM_CB_NEED_E;
280 break;
281 case LM_ST_DEFERRED:
282 cb = LM_CB_NEED_D;
283 break;
284 case LM_ST_SHARED:
285 cb = LM_CB_NEED_S;
286 break;
287 default:
288 gdlm_assert(0, "unknown bast mode %u", bast_mode);
289 }
290
291 ls->fscb(ls->sdp, cb, &lp->lockname);
292}
293
294
295static void gdlm_bast(void *astarg, int mode)
296{
297 struct gdlm_lock *lp = astarg;
David Teigland29b79982006-01-16 16:52:38 +0000298
299 if (!mode) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500300 printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
David Teigland9229f012006-05-24 09:21:30 -0400301 lp->lockname.ln_type,
302 (unsigned long long)lp->lockname.ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000303 return;
304 }
305
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100306 process_blocking(lp, mode);
David Teigland29b79982006-01-16 16:52:38 +0000307}
308
309/* convert gfs lock-state to dlm lock-mode */
310
Steven Whitehousecd915492006-09-04 12:49:07 -0400311static s16 make_mode(s16 lmstate)
David Teigland29b79982006-01-16 16:52:38 +0000312{
313 switch (lmstate) {
314 case LM_ST_UNLOCKED:
315 return DLM_LOCK_NL;
316 case LM_ST_EXCLUSIVE:
317 return DLM_LOCK_EX;
318 case LM_ST_DEFERRED:
319 return DLM_LOCK_CW;
320 case LM_ST_SHARED:
321 return DLM_LOCK_PR;
David Teigland29b79982006-01-16 16:52:38 +0000322 }
David Teigland869d81d2006-01-17 08:47:12 +0000323 gdlm_assert(0, "unknown LM state %d", lmstate);
324 return -1;
David Teigland29b79982006-01-16 16:52:38 +0000325}
326
David Teigland29b79982006-01-16 16:52:38 +0000327
328/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
329 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
330
331static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
332{
Steven Whitehousecd915492006-09-04 12:49:07 -0400333 s16 cur = make_mode(cur_state);
David Teigland29b79982006-01-16 16:52:38 +0000334 if (lp->cur != DLM_LOCK_IV)
David Teigland869d81d2006-01-17 08:47:12 +0000335 gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
David Teigland29b79982006-01-16 16:52:38 +0000336}
337
338static inline unsigned int make_flags(struct gdlm_lock *lp,
339 unsigned int gfs_flags,
Steven Whitehousecd915492006-09-04 12:49:07 -0400340 s16 cur, s16 req)
David Teigland29b79982006-01-16 16:52:38 +0000341{
342 unsigned int lkf = 0;
343
344 if (gfs_flags & LM_FLAG_TRY)
345 lkf |= DLM_LKF_NOQUEUE;
346
347 if (gfs_flags & LM_FLAG_TRY_1CB) {
348 lkf |= DLM_LKF_NOQUEUE;
349 lkf |= DLM_LKF_NOQUEUEBAST;
350 }
351
352 if (gfs_flags & LM_FLAG_PRIORITY) {
353 lkf |= DLM_LKF_NOORDER;
354 lkf |= DLM_LKF_HEADQUE;
355 }
356
357 if (gfs_flags & LM_FLAG_ANY) {
358 if (req == DLM_LOCK_PR)
359 lkf |= DLM_LKF_ALTCW;
360 else if (req == DLM_LOCK_CW)
361 lkf |= DLM_LKF_ALTPR;
362 }
363
364 if (lp->lksb.sb_lkid != 0) {
365 lkf |= DLM_LKF_CONVERT;
David Teigland29b79982006-01-16 16:52:38 +0000366 }
367
368 if (lp->lvb)
369 lkf |= DLM_LKF_VALBLK;
370
371 return lkf;
372}
373
374/* make_strname - convert GFS lock numbers to a string */
375
Steven Whitehousef35ac342007-03-18 17:04:15 +0000376static inline void make_strname(const struct lm_lockname *lockname,
David Teigland29b79982006-01-16 16:52:38 +0000377 struct gdlm_strname *str)
378{
David Teigland869d81d2006-01-17 08:47:12 +0000379 sprintf(str->name, "%8x%16llx", lockname->ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400380 (unsigned long long)lockname->ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000381 str->namelen = GDLM_STRNAME_BYTES;
382}
383
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400384static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
385 struct gdlm_lock **lpp)
David Teigland29b79982006-01-16 16:52:38 +0000386{
387 struct gdlm_lock *lp;
388
Josef Bacik16c5f062008-04-09 09:33:41 -0400389 lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
David Teigland29b79982006-01-16 16:52:38 +0000390 if (!lp)
391 return -ENOMEM;
392
David Teigland29b79982006-01-16 16:52:38 +0000393 lp->lockname = *name;
Steven Whitehousef35ac342007-03-18 17:04:15 +0000394 make_strname(name, &lp->strname);
David Teigland29b79982006-01-16 16:52:38 +0000395 lp->ls = ls;
396 lp->cur = DLM_LOCK_IV;
David Teigland29b79982006-01-16 16:52:38 +0000397 INIT_LIST_HEAD(&lp->delay_list);
398
399 spin_lock(&ls->async_lock);
400 list_add(&lp->all_list, &ls->all_locks);
401 ls->all_locks_count++;
402 spin_unlock(&ls->async_lock);
403
404 *lpp = lp;
405 return 0;
406}
407
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400408int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
409 void **lockp)
David Teigland29b79982006-01-16 16:52:38 +0000410{
411 struct gdlm_lock *lp;
412 int error;
413
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400414 error = gdlm_create_lp(lockspace, name, &lp);
David Teigland29b79982006-01-16 16:52:38 +0000415
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400416 *lockp = lp;
David Teigland29b79982006-01-16 16:52:38 +0000417 return error;
418}
419
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400420void gdlm_put_lock(void *lock)
David Teigland29b79982006-01-16 16:52:38 +0000421{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400422 gdlm_delete_lp(lock);
David Teigland29b79982006-01-16 16:52:38 +0000423}
424
David Teigland8d3b35a2006-02-23 10:00:56 +0000425unsigned int gdlm_do_lock(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000426{
427 struct gdlm_ls *ls = lp->ls;
David Teigland29b79982006-01-16 16:52:38 +0000428 int error, bast = 1;
429
430 /*
431 * When recovery is in progress, delay lock requests for submission
432 * once recovery is done. Requests for recovery (NOEXP) and unlocks
433 * can pass.
434 */
435
436 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
437 !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
438 gdlm_queue_delayed(lp);
David Teigland869d81d2006-01-17 08:47:12 +0000439 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000440 }
441
442 /*
443 * Submit the actual lock request.
444 */
445
446 if (test_bit(LFL_NOBAST, &lp->flags))
447 bast = 0;
448
David Teigland29b79982006-01-16 16:52:38 +0000449 set_bit(LFL_ACTIVE, &lp->flags);
450
David Teigland869d81d2006-01-17 08:47:12 +0000451 log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400452 (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
David Teigland29b79982006-01-16 16:52:38 +0000453 lp->cur, lp->req, lp->lkf);
454
455 error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
Steven Whitehousef35ac342007-03-18 17:04:15 +0000456 lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
457 lp, bast ? gdlm_bast : NULL);
David Teigland29b79982006-01-16 16:52:38 +0000458
459 if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
460 lp->lksb.sb_status = -EAGAIN;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100461 gdlm_ast(lp);
David Teigland29b79982006-01-16 16:52:38 +0000462 error = 0;
463 }
464
David Teigland869d81d2006-01-17 08:47:12 +0000465 if (error) {
David Teiglandb9af8a72007-03-28 11:08:04 -0500466 log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
David Teigland869d81d2006-01-17 08:47:12 +0000467 "flags=%lx", ls->fsname, lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400468 (unsigned long long)lp->lockname.ln_number, error,
469 lp->cur, lp->req, lp->lkf, lp->flags);
David Teigland869d81d2006-01-17 08:47:12 +0000470 return LM_OUT_ERROR;
471 }
472 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000473}
474
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400475static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000476{
David Teigland869d81d2006-01-17 08:47:12 +0000477 struct gdlm_ls *ls = lp->ls;
David Teigland29b79982006-01-16 16:52:38 +0000478 unsigned int lkf = 0;
479 int error;
480
481 set_bit(LFL_DLM_UNLOCK, &lp->flags);
482 set_bit(LFL_ACTIVE, &lp->flags);
483
484 if (lp->lvb)
485 lkf = DLM_LKF_VALBLK;
486
David Teigland869d81d2006-01-17 08:47:12 +0000487 log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400488 (unsigned long long)lp->lockname.ln_number,
489 lp->lksb.sb_lkid, lp->cur, lkf);
David Teigland29b79982006-01-16 16:52:38 +0000490
David Teigland869d81d2006-01-17 08:47:12 +0000491 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
David Teigland29b79982006-01-16 16:52:38 +0000492
David Teigland869d81d2006-01-17 08:47:12 +0000493 if (error) {
David Teiglandb9af8a72007-03-28 11:08:04 -0500494 log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
David Teigland869d81d2006-01-17 08:47:12 +0000495 "flags=%lx", ls->fsname, lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400496 (unsigned long long)lp->lockname.ln_number, error,
497 lp->cur, lp->req, lp->lkf, lp->flags);
David Teigland869d81d2006-01-17 08:47:12 +0000498 return LM_OUT_ERROR;
499 }
500 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000501}
502
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400503unsigned int gdlm_lock(void *lock, unsigned int cur_state,
David Teigland29b79982006-01-16 16:52:38 +0000504 unsigned int req_state, unsigned int flags)
505{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400506 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000507
Steven Whitehouse6802e342008-05-21 17:03:22 +0100508 if (req_state == LM_ST_UNLOCKED)
509 return gdlm_unlock(lock, cur_state);
510
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100511 if (req_state == LM_ST_UNLOCKED)
512 return gdlm_unlock(lock, cur_state);
513
David Teigland29b79982006-01-16 16:52:38 +0000514 clear_bit(LFL_DLM_CANCEL, &lp->flags);
515 if (flags & LM_FLAG_NOEXP)
516 set_bit(LFL_NOBLOCK, &lp->flags);
517
518 check_cur_state(lp, cur_state);
519 lp->req = make_mode(req_state);
520 lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
521
David Teigland8d3b35a2006-02-23 10:00:56 +0000522 return gdlm_do_lock(lp);
David Teigland29b79982006-01-16 16:52:38 +0000523}
524
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400525unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
David Teigland29b79982006-01-16 16:52:38 +0000526{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400527 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000528
529 clear_bit(LFL_DLM_CANCEL, &lp->flags);
530 if (lp->cur == DLM_LOCK_IV)
531 return 0;
David Teigland869d81d2006-01-17 08:47:12 +0000532 return gdlm_do_unlock(lp);
David Teigland29b79982006-01-16 16:52:38 +0000533}
534
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400535void gdlm_cancel(void *lock)
David Teigland29b79982006-01-16 16:52:38 +0000536{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400537 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000538 struct gdlm_ls *ls = lp->ls;
539 int error, delay_list = 0;
540
541 if (test_bit(LFL_DLM_CANCEL, &lp->flags))
542 return;
543
David Teigland9229f012006-05-24 09:21:30 -0400544 log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
545 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000546
547 spin_lock(&ls->async_lock);
548 if (!list_empty(&lp->delay_list)) {
549 list_del_init(&lp->delay_list);
550 delay_list = 1;
551 }
552 spin_unlock(&ls->async_lock);
553
554 if (delay_list) {
555 set_bit(LFL_CANCEL, &lp->flags);
556 set_bit(LFL_ACTIVE, &lp->flags);
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100557 gdlm_ast(lp);
David Teigland29b79982006-01-16 16:52:38 +0000558 return;
559 }
560
561 if (!test_bit(LFL_ACTIVE, &lp->flags) ||
David Teigland9229f012006-05-24 09:21:30 -0400562 test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
David Teigland869d81d2006-01-17 08:47:12 +0000563 log_info("gdlm_cancel skip %x,%llx flags %lx",
David Teigland9229f012006-05-24 09:21:30 -0400564 lp->lockname.ln_type,
565 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000566 return;
567 }
568
569 /* the lock is blocked in the dlm */
570
571 set_bit(LFL_DLM_CANCEL, &lp->flags);
572 set_bit(LFL_ACTIVE, &lp->flags);
573
574 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
575 NULL, lp);
576
David Teigland869d81d2006-01-17 08:47:12 +0000577 log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
David Teigland9229f012006-05-24 09:21:30 -0400578 lp->lockname.ln_type,
579 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000580
581 if (error == -EBUSY)
582 clear_bit(LFL_DLM_CANCEL, &lp->flags);
583}
584
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400585static int gdlm_add_lvb(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000586{
587 char *lvb;
588
Josef Bacik16c5f062008-04-09 09:33:41 -0400589 lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
David Teigland29b79982006-01-16 16:52:38 +0000590 if (!lvb)
591 return -ENOMEM;
592
David Teigland29b79982006-01-16 16:52:38 +0000593 lp->lksb.sb_lvbptr = lvb;
594 lp->lvb = lvb;
595 return 0;
596}
597
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400598static void gdlm_del_lvb(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000599{
600 kfree(lp->lvb);
601 lp->lvb = NULL;
602 lp->lksb.sb_lvbptr = NULL;
603}
604
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100605static int gdlm_ast_wait(void *word)
606{
607 schedule();
608 return 0;
609}
610
David Teigland29b79982006-01-16 16:52:38 +0000611/* This can do a synchronous dlm request (requiring a lock_dlm thread to get
612 the completion) because gfs won't call hold_lvb() during a callback (from
613 the context of a lock_dlm thread). */
614
615static int hold_null_lock(struct gdlm_lock *lp)
616{
617 struct gdlm_lock *lpn = NULL;
618 int error;
619
620 if (lp->hold_null) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500621 printk(KERN_INFO "lock_dlm: lvb already held\n");
David Teigland29b79982006-01-16 16:52:38 +0000622 return 0;
623 }
624
625 error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
626 if (error)
627 goto out;
628
629 lpn->lksb.sb_lvbptr = junk_lvb;
630 lpn->lvb = junk_lvb;
631
632 lpn->req = DLM_LOCK_NL;
633 lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
634 set_bit(LFL_NOBAST, &lpn->flags);
635 set_bit(LFL_INLOCK, &lpn->flags);
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100636 set_bit(LFL_AST_WAIT, &lpn->flags);
David Teigland29b79982006-01-16 16:52:38 +0000637
David Teigland8d3b35a2006-02-23 10:00:56 +0000638 gdlm_do_lock(lpn);
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100639 wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
David Teiglandc5921fd2006-07-20 09:06:34 -0500640 error = lpn->lksb.sb_status;
David Teigland29b79982006-01-16 16:52:38 +0000641 if (error) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500642 printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
643 error);
David Teigland29b79982006-01-16 16:52:38 +0000644 gdlm_delete_lp(lpn);
645 lpn = NULL;
646 }
Steven Whitehousea91ea692006-09-04 12:04:26 -0400647out:
David Teigland29b79982006-01-16 16:52:38 +0000648 lp->hold_null = lpn;
649 return error;
650}
651
652/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
653 the completion) because gfs may call unhold_lvb() during a callback (from
654 the context of a lock_dlm thread) which could cause a deadlock since the
655 other lock_dlm thread could be engaged in recovery. */
656
657static void unhold_null_lock(struct gdlm_lock *lp)
658{
659 struct gdlm_lock *lpn = lp->hold_null;
660
David Teigland9229f012006-05-24 09:21:30 -0400661 gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
662 (unsigned long long)lp->lockname.ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000663 lpn->lksb.sb_lvbptr = NULL;
664 lpn->lvb = NULL;
665 set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
666 gdlm_do_unlock(lpn);
667 lp->hold_null = NULL;
668}
669
670/* Acquire a NL lock because gfs requires the value block to remain
671 intact on the resource while the lvb is "held" even if it's holding no locks
672 on the resource. */
673
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400674int gdlm_hold_lvb(void *lock, char **lvbp)
David Teigland29b79982006-01-16 16:52:38 +0000675{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400676 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000677 int error;
678
679 error = gdlm_add_lvb(lp);
680 if (error)
681 return error;
682
683 *lvbp = lp->lvb;
684
685 error = hold_null_lock(lp);
686 if (error)
687 gdlm_del_lvb(lp);
688
689 return error;
690}
691
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400692void gdlm_unhold_lvb(void *lock, char *lvb)
David Teigland29b79982006-01-16 16:52:38 +0000693{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400694 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000695
696 unhold_null_lock(lp);
697 gdlm_del_lvb(lp);
698}
699
David Teigland29b79982006-01-16 16:52:38 +0000700void gdlm_submit_delayed(struct gdlm_ls *ls)
701{
702 struct gdlm_lock *lp, *safe;
703
704 spin_lock(&ls->async_lock);
705 list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
706 list_del_init(&lp->delay_list);
707 list_add_tail(&lp->delay_list, &ls->submit);
708 }
709 spin_unlock(&ls->async_lock);
710 wake_up(&ls->thread_wait);
711}
712
713int gdlm_release_all_locks(struct gdlm_ls *ls)
714{
715 struct gdlm_lock *lp, *safe;
716 int count = 0;
717
718 spin_lock(&ls->async_lock);
719 list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
720 list_del_init(&lp->all_list);
721
722 if (lp->lvb && lp->lvb != junk_lvb)
723 kfree(lp->lvb);
724 kfree(lp);
725 count++;
726 }
727 spin_unlock(&ls->async_lock);
728
729 return count;
730}
731