David Teigland | 29b7998 | 2006-01-16 16:52:38 +0000 | [diff] [blame^] | 1 | /****************************************************************************** |
| 2 | ******************************************************************************* |
| 3 | ** |
| 4 | ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
| 5 | ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. |
| 6 | ** |
| 7 | ** This copyrighted material is made available to anyone wishing to use, |
| 8 | ** modify, copy, or redistribute it subject to the terms and conditions |
| 9 | ** of the GNU General Public License v.2. |
| 10 | ** |
| 11 | ******************************************************************************* |
| 12 | ******************************************************************************/ |
| 13 | |
| 14 | #include "lock_dlm.h" |
| 15 | |
| 16 | static char junk_lvb[GDLM_LVB_SIZE]; |
| 17 | |
| 18 | static void queue_complete(struct gdlm_lock *lp) |
| 19 | { |
| 20 | struct gdlm_ls *ls = lp->ls; |
| 21 | |
| 22 | clear_bit(LFL_ACTIVE, &lp->flags); |
| 23 | |
| 24 | spin_lock(&ls->async_lock); |
| 25 | list_add_tail(&lp->clist, &ls->complete); |
| 26 | spin_unlock(&ls->async_lock); |
| 27 | wake_up(&ls->thread_wait); |
| 28 | } |
| 29 | |
| 30 | static inline void gdlm_ast(void *astarg) |
| 31 | { |
| 32 | queue_complete((struct gdlm_lock *) astarg); |
| 33 | } |
| 34 | |
| 35 | static inline void gdlm_bast(void *astarg, int mode) |
| 36 | { |
| 37 | struct gdlm_lock *lp = astarg; |
| 38 | struct gdlm_ls *ls = lp->ls; |
| 39 | |
| 40 | if (!mode) { |
| 41 | printk("lock_dlm: bast mode zero %x,%"PRIx64"\n", |
| 42 | lp->lockname.ln_type, lp->lockname.ln_number); |
| 43 | return; |
| 44 | } |
| 45 | |
| 46 | spin_lock(&ls->async_lock); |
| 47 | if (!lp->bast_mode) { |
| 48 | list_add_tail(&lp->blist, &ls->blocking); |
| 49 | lp->bast_mode = mode; |
| 50 | } else if (lp->bast_mode < mode) |
| 51 | lp->bast_mode = mode; |
| 52 | spin_unlock(&ls->async_lock); |
| 53 | wake_up(&ls->thread_wait); |
| 54 | } |
| 55 | |
| 56 | void gdlm_queue_delayed(struct gdlm_lock *lp) |
| 57 | { |
| 58 | struct gdlm_ls *ls = lp->ls; |
| 59 | |
| 60 | spin_lock(&ls->async_lock); |
| 61 | list_add_tail(&lp->delay_list, &ls->delayed); |
| 62 | spin_unlock(&ls->async_lock); |
| 63 | } |
| 64 | |
| 65 | /* convert gfs lock-state to dlm lock-mode */ |
| 66 | |
| 67 | static int16_t make_mode(int16_t lmstate) |
| 68 | { |
| 69 | switch (lmstate) { |
| 70 | case LM_ST_UNLOCKED: |
| 71 | return DLM_LOCK_NL; |
| 72 | case LM_ST_EXCLUSIVE: |
| 73 | return DLM_LOCK_EX; |
| 74 | case LM_ST_DEFERRED: |
| 75 | return DLM_LOCK_CW; |
| 76 | case LM_ST_SHARED: |
| 77 | return DLM_LOCK_PR; |
| 78 | default: |
| 79 | GDLM_ASSERT(0, printk("unknown LM state %d\n", lmstate);); |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | /* convert dlm lock-mode to gfs lock-state */ |
| 84 | |
| 85 | int16_t gdlm_make_lmstate(int16_t dlmmode) |
| 86 | { |
| 87 | switch (dlmmode) { |
| 88 | case DLM_LOCK_IV: |
| 89 | case DLM_LOCK_NL: |
| 90 | return LM_ST_UNLOCKED; |
| 91 | case DLM_LOCK_EX: |
| 92 | return LM_ST_EXCLUSIVE; |
| 93 | case DLM_LOCK_CW: |
| 94 | return LM_ST_DEFERRED; |
| 95 | case DLM_LOCK_PR: |
| 96 | return LM_ST_SHARED; |
| 97 | default: |
| 98 | GDLM_ASSERT(0, printk("unknown DLM mode %d\n", dlmmode);); |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and |
| 103 | DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */ |
| 104 | |
| 105 | static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state) |
| 106 | { |
| 107 | int16_t cur = make_mode(cur_state); |
| 108 | if (lp->cur != DLM_LOCK_IV) |
| 109 | GDLM_ASSERT(lp->cur == cur, printk("%d, %d\n", lp->cur, cur);); |
| 110 | } |
| 111 | |
| 112 | static inline unsigned int make_flags(struct gdlm_lock *lp, |
| 113 | unsigned int gfs_flags, |
| 114 | int16_t cur, int16_t req) |
| 115 | { |
| 116 | unsigned int lkf = 0; |
| 117 | |
| 118 | if (gfs_flags & LM_FLAG_TRY) |
| 119 | lkf |= DLM_LKF_NOQUEUE; |
| 120 | |
| 121 | if (gfs_flags & LM_FLAG_TRY_1CB) { |
| 122 | lkf |= DLM_LKF_NOQUEUE; |
| 123 | lkf |= DLM_LKF_NOQUEUEBAST; |
| 124 | } |
| 125 | |
| 126 | if (gfs_flags & LM_FLAG_PRIORITY) { |
| 127 | lkf |= DLM_LKF_NOORDER; |
| 128 | lkf |= DLM_LKF_HEADQUE; |
| 129 | } |
| 130 | |
| 131 | if (gfs_flags & LM_FLAG_ANY) { |
| 132 | if (req == DLM_LOCK_PR) |
| 133 | lkf |= DLM_LKF_ALTCW; |
| 134 | else if (req == DLM_LOCK_CW) |
| 135 | lkf |= DLM_LKF_ALTPR; |
| 136 | } |
| 137 | |
| 138 | if (lp->lksb.sb_lkid != 0) { |
| 139 | lkf |= DLM_LKF_CONVERT; |
| 140 | |
| 141 | /* Conversion deadlock avoidance by DLM */ |
| 142 | |
| 143 | if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) && |
| 144 | !(lkf & DLM_LKF_NOQUEUE) && |
| 145 | cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req) |
| 146 | lkf |= DLM_LKF_CONVDEADLK; |
| 147 | } |
| 148 | |
| 149 | if (lp->lvb) |
| 150 | lkf |= DLM_LKF_VALBLK; |
| 151 | |
| 152 | return lkf; |
| 153 | } |
| 154 | |
| 155 | /* make_strname - convert GFS lock numbers to a string */ |
| 156 | |
| 157 | static inline void make_strname(struct lm_lockname *lockname, |
| 158 | struct gdlm_strname *str) |
| 159 | { |
| 160 | sprintf(str->name, "%8x%16"PRIx64, lockname->ln_type, |
| 161 | lockname->ln_number); |
| 162 | str->namelen = GDLM_STRNAME_BYTES; |
| 163 | } |
| 164 | |
| 165 | int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name, |
| 166 | struct gdlm_lock **lpp) |
| 167 | { |
| 168 | struct gdlm_lock *lp; |
| 169 | |
| 170 | lp = kmalloc(sizeof(struct gdlm_lock), GFP_KERNEL); |
| 171 | if (!lp) |
| 172 | return -ENOMEM; |
| 173 | |
| 174 | memset(lp, 0, sizeof(struct gdlm_lock)); |
| 175 | lp->lockname = *name; |
| 176 | lp->ls = ls; |
| 177 | lp->cur = DLM_LOCK_IV; |
| 178 | lp->lvb = NULL; |
| 179 | lp->hold_null = NULL; |
| 180 | init_completion(&lp->ast_wait); |
| 181 | INIT_LIST_HEAD(&lp->clist); |
| 182 | INIT_LIST_HEAD(&lp->blist); |
| 183 | INIT_LIST_HEAD(&lp->delay_list); |
| 184 | |
| 185 | spin_lock(&ls->async_lock); |
| 186 | list_add(&lp->all_list, &ls->all_locks); |
| 187 | ls->all_locks_count++; |
| 188 | spin_unlock(&ls->async_lock); |
| 189 | |
| 190 | *lpp = lp; |
| 191 | return 0; |
| 192 | } |
| 193 | |
| 194 | void gdlm_delete_lp(struct gdlm_lock *lp) |
| 195 | { |
| 196 | struct gdlm_ls *ls = lp->ls; |
| 197 | |
| 198 | spin_lock(&ls->async_lock); |
| 199 | if (!list_empty(&lp->clist)) |
| 200 | list_del_init(&lp->clist); |
| 201 | if (!list_empty(&lp->blist)) |
| 202 | list_del_init(&lp->blist); |
| 203 | if (!list_empty(&lp->delay_list)) |
| 204 | list_del_init(&lp->delay_list); |
| 205 | GDLM_ASSERT(!list_empty(&lp->all_list),); |
| 206 | list_del_init(&lp->all_list); |
| 207 | ls->all_locks_count--; |
| 208 | spin_unlock(&ls->async_lock); |
| 209 | |
| 210 | kfree(lp); |
| 211 | } |
| 212 | |
| 213 | int gdlm_get_lock(lm_lockspace_t *lockspace, struct lm_lockname *name, |
| 214 | lm_lock_t **lockp) |
| 215 | { |
| 216 | struct gdlm_lock *lp; |
| 217 | int error; |
| 218 | |
| 219 | error = gdlm_create_lp((struct gdlm_ls *) lockspace, name, &lp); |
| 220 | |
| 221 | *lockp = (lm_lock_t *) lp; |
| 222 | return error; |
| 223 | } |
| 224 | |
| 225 | void gdlm_put_lock(lm_lock_t *lock) |
| 226 | { |
| 227 | gdlm_delete_lp((struct gdlm_lock *) lock); |
| 228 | } |
| 229 | |
| 230 | void gdlm_do_lock(struct gdlm_lock *lp, struct dlm_range *range) |
| 231 | { |
| 232 | struct gdlm_ls *ls = lp->ls; |
| 233 | struct gdlm_strname str; |
| 234 | int error, bast = 1; |
| 235 | |
| 236 | /* |
| 237 | * When recovery is in progress, delay lock requests for submission |
| 238 | * once recovery is done. Requests for recovery (NOEXP) and unlocks |
| 239 | * can pass. |
| 240 | */ |
| 241 | |
| 242 | if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && |
| 243 | !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) { |
| 244 | gdlm_queue_delayed(lp); |
| 245 | return; |
| 246 | } |
| 247 | |
| 248 | /* |
| 249 | * Submit the actual lock request. |
| 250 | */ |
| 251 | |
| 252 | if (test_bit(LFL_NOBAST, &lp->flags)) |
| 253 | bast = 0; |
| 254 | |
| 255 | make_strname(&lp->lockname, &str); |
| 256 | |
| 257 | set_bit(LFL_ACTIVE, &lp->flags); |
| 258 | |
| 259 | log_debug("lk %x,%"PRIx64" id %x %d,%d %x", lp->lockname.ln_type, |
| 260 | lp->lockname.ln_number, lp->lksb.sb_lkid, |
| 261 | lp->cur, lp->req, lp->lkf); |
| 262 | |
| 263 | error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf, |
| 264 | str.name, str.namelen, 0, gdlm_ast, (void *) lp, |
| 265 | bast ? gdlm_bast : NULL, range); |
| 266 | |
| 267 | if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) { |
| 268 | lp->lksb.sb_status = -EAGAIN; |
| 269 | queue_complete(lp); |
| 270 | error = 0; |
| 271 | } |
| 272 | |
| 273 | GDLM_ASSERT(!error, |
| 274 | printk("%s: num=%x,%"PRIx64" err=%d cur=%d req=%d lkf=%x\n", |
| 275 | ls->fsname, lp->lockname.ln_type, |
| 276 | lp->lockname.ln_number, error, lp->cur, lp->req, |
| 277 | lp->lkf);); |
| 278 | } |
| 279 | |
| 280 | void gdlm_do_unlock(struct gdlm_lock *lp) |
| 281 | { |
| 282 | unsigned int lkf = 0; |
| 283 | int error; |
| 284 | |
| 285 | set_bit(LFL_DLM_UNLOCK, &lp->flags); |
| 286 | set_bit(LFL_ACTIVE, &lp->flags); |
| 287 | |
| 288 | if (lp->lvb) |
| 289 | lkf = DLM_LKF_VALBLK; |
| 290 | |
| 291 | log_debug("un %x,%"PRIx64" %x %d %x", lp->lockname.ln_type, |
| 292 | lp->lockname.ln_number, lp->lksb.sb_lkid, lp->cur, lkf); |
| 293 | |
| 294 | error = dlm_unlock(lp->ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, |
| 295 | NULL, lp); |
| 296 | |
| 297 | GDLM_ASSERT(!error, |
| 298 | printk("%s: error=%d num=%x,%"PRIx64" lkf=%x flags=%lx\n", |
| 299 | lp->ls->fsname, error, lp->lockname.ln_type, |
| 300 | lp->lockname.ln_number, lkf, lp->flags);); |
| 301 | } |
| 302 | |
| 303 | unsigned int gdlm_lock(lm_lock_t *lock, unsigned int cur_state, |
| 304 | unsigned int req_state, unsigned int flags) |
| 305 | { |
| 306 | struct gdlm_lock *lp = (struct gdlm_lock *) lock; |
| 307 | |
| 308 | clear_bit(LFL_DLM_CANCEL, &lp->flags); |
| 309 | if (flags & LM_FLAG_NOEXP) |
| 310 | set_bit(LFL_NOBLOCK, &lp->flags); |
| 311 | |
| 312 | check_cur_state(lp, cur_state); |
| 313 | lp->req = make_mode(req_state); |
| 314 | lp->lkf = make_flags(lp, flags, lp->cur, lp->req); |
| 315 | |
| 316 | gdlm_do_lock(lp, NULL); |
| 317 | return LM_OUT_ASYNC; |
| 318 | } |
| 319 | |
| 320 | unsigned int gdlm_unlock(lm_lock_t *lock, unsigned int cur_state) |
| 321 | { |
| 322 | struct gdlm_lock *lp = (struct gdlm_lock *) lock; |
| 323 | |
| 324 | clear_bit(LFL_DLM_CANCEL, &lp->flags); |
| 325 | if (lp->cur == DLM_LOCK_IV) |
| 326 | return 0; |
| 327 | gdlm_do_unlock(lp); |
| 328 | return LM_OUT_ASYNC; |
| 329 | } |
| 330 | |
| 331 | void gdlm_cancel(lm_lock_t *lock) |
| 332 | { |
| 333 | struct gdlm_lock *lp = (struct gdlm_lock *) lock; |
| 334 | struct gdlm_ls *ls = lp->ls; |
| 335 | int error, delay_list = 0; |
| 336 | |
| 337 | if (test_bit(LFL_DLM_CANCEL, &lp->flags)) |
| 338 | return; |
| 339 | |
| 340 | log_all("gdlm_cancel %x,%"PRIx64" flags %lx", |
| 341 | lp->lockname.ln_type, lp->lockname.ln_number, lp->flags); |
| 342 | |
| 343 | spin_lock(&ls->async_lock); |
| 344 | if (!list_empty(&lp->delay_list)) { |
| 345 | list_del_init(&lp->delay_list); |
| 346 | delay_list = 1; |
| 347 | } |
| 348 | spin_unlock(&ls->async_lock); |
| 349 | |
| 350 | if (delay_list) { |
| 351 | set_bit(LFL_CANCEL, &lp->flags); |
| 352 | set_bit(LFL_ACTIVE, &lp->flags); |
| 353 | queue_complete(lp); |
| 354 | return; |
| 355 | } |
| 356 | |
| 357 | if (!test_bit(LFL_ACTIVE, &lp->flags) || |
| 358 | test_bit(LFL_DLM_UNLOCK, &lp->flags)) { |
| 359 | log_all("gdlm_cancel skip %x,%"PRIx64" flags %lx", |
| 360 | lp->lockname.ln_type, lp->lockname.ln_number, |
| 361 | lp->flags); |
| 362 | return; |
| 363 | } |
| 364 | |
| 365 | /* the lock is blocked in the dlm */ |
| 366 | |
| 367 | set_bit(LFL_DLM_CANCEL, &lp->flags); |
| 368 | set_bit(LFL_ACTIVE, &lp->flags); |
| 369 | |
| 370 | error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL, |
| 371 | NULL, lp); |
| 372 | |
| 373 | log_all("gdlm_cancel rv %d %x,%"PRIx64" flags %lx", error, |
| 374 | lp->lockname.ln_type, lp->lockname.ln_number, lp->flags); |
| 375 | |
| 376 | if (error == -EBUSY) |
| 377 | clear_bit(LFL_DLM_CANCEL, &lp->flags); |
| 378 | } |
| 379 | |
| 380 | int gdlm_add_lvb(struct gdlm_lock *lp) |
| 381 | { |
| 382 | char *lvb; |
| 383 | |
| 384 | lvb = kmalloc(GDLM_LVB_SIZE, GFP_KERNEL); |
| 385 | if (!lvb) |
| 386 | return -ENOMEM; |
| 387 | |
| 388 | memset(lvb, 0, GDLM_LVB_SIZE); |
| 389 | |
| 390 | lp->lksb.sb_lvbptr = lvb; |
| 391 | lp->lvb = lvb; |
| 392 | return 0; |
| 393 | } |
| 394 | |
| 395 | void gdlm_del_lvb(struct gdlm_lock *lp) |
| 396 | { |
| 397 | kfree(lp->lvb); |
| 398 | lp->lvb = NULL; |
| 399 | lp->lksb.sb_lvbptr = NULL; |
| 400 | } |
| 401 | |
| 402 | /* This can do a synchronous dlm request (requiring a lock_dlm thread to get |
| 403 | the completion) because gfs won't call hold_lvb() during a callback (from |
| 404 | the context of a lock_dlm thread). */ |
| 405 | |
| 406 | static int hold_null_lock(struct gdlm_lock *lp) |
| 407 | { |
| 408 | struct gdlm_lock *lpn = NULL; |
| 409 | int error; |
| 410 | |
| 411 | if (lp->hold_null) { |
| 412 | printk("lock_dlm: lvb already held\n"); |
| 413 | return 0; |
| 414 | } |
| 415 | |
| 416 | error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn); |
| 417 | if (error) |
| 418 | goto out; |
| 419 | |
| 420 | lpn->lksb.sb_lvbptr = junk_lvb; |
| 421 | lpn->lvb = junk_lvb; |
| 422 | |
| 423 | lpn->req = DLM_LOCK_NL; |
| 424 | lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE; |
| 425 | set_bit(LFL_NOBAST, &lpn->flags); |
| 426 | set_bit(LFL_INLOCK, &lpn->flags); |
| 427 | |
| 428 | init_completion(&lpn->ast_wait); |
| 429 | gdlm_do_lock(lpn, NULL); |
| 430 | wait_for_completion(&lpn->ast_wait); |
| 431 | error = lp->lksb.sb_status; |
| 432 | if (error) { |
| 433 | printk("lock_dlm: hold_null_lock dlm error %d\n", error); |
| 434 | gdlm_delete_lp(lpn); |
| 435 | lpn = NULL; |
| 436 | } |
| 437 | out: |
| 438 | lp->hold_null = lpn; |
| 439 | return error; |
| 440 | } |
| 441 | |
| 442 | /* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get |
| 443 | the completion) because gfs may call unhold_lvb() during a callback (from |
| 444 | the context of a lock_dlm thread) which could cause a deadlock since the |
| 445 | other lock_dlm thread could be engaged in recovery. */ |
| 446 | |
| 447 | static void unhold_null_lock(struct gdlm_lock *lp) |
| 448 | { |
| 449 | struct gdlm_lock *lpn = lp->hold_null; |
| 450 | |
| 451 | GDLM_ASSERT(lpn,); |
| 452 | lpn->lksb.sb_lvbptr = NULL; |
| 453 | lpn->lvb = NULL; |
| 454 | set_bit(LFL_UNLOCK_DELETE, &lpn->flags); |
| 455 | gdlm_do_unlock(lpn); |
| 456 | lp->hold_null = NULL; |
| 457 | } |
| 458 | |
| 459 | /* Acquire a NL lock because gfs requires the value block to remain |
| 460 | intact on the resource while the lvb is "held" even if it's holding no locks |
| 461 | on the resource. */ |
| 462 | |
| 463 | int gdlm_hold_lvb(lm_lock_t *lock, char **lvbp) |
| 464 | { |
| 465 | struct gdlm_lock *lp = (struct gdlm_lock *) lock; |
| 466 | int error; |
| 467 | |
| 468 | error = gdlm_add_lvb(lp); |
| 469 | if (error) |
| 470 | return error; |
| 471 | |
| 472 | *lvbp = lp->lvb; |
| 473 | |
| 474 | error = hold_null_lock(lp); |
| 475 | if (error) |
| 476 | gdlm_del_lvb(lp); |
| 477 | |
| 478 | return error; |
| 479 | } |
| 480 | |
| 481 | void gdlm_unhold_lvb(lm_lock_t *lock, char *lvb) |
| 482 | { |
| 483 | struct gdlm_lock *lp = (struct gdlm_lock *) lock; |
| 484 | |
| 485 | unhold_null_lock(lp); |
| 486 | gdlm_del_lvb(lp); |
| 487 | } |
| 488 | |
| 489 | void gdlm_sync_lvb(lm_lock_t *lock, char *lvb) |
| 490 | { |
| 491 | struct gdlm_lock *lp = (struct gdlm_lock *) lock; |
| 492 | |
| 493 | if (lp->cur != DLM_LOCK_EX) |
| 494 | return; |
| 495 | |
| 496 | init_completion(&lp->ast_wait); |
| 497 | set_bit(LFL_SYNC_LVB, &lp->flags); |
| 498 | |
| 499 | lp->req = DLM_LOCK_EX; |
| 500 | lp->lkf = make_flags(lp, 0, lp->cur, lp->req); |
| 501 | |
| 502 | gdlm_do_lock(lp, NULL); |
| 503 | wait_for_completion(&lp->ast_wait); |
| 504 | } |
| 505 | |
| 506 | void gdlm_submit_delayed(struct gdlm_ls *ls) |
| 507 | { |
| 508 | struct gdlm_lock *lp, *safe; |
| 509 | |
| 510 | spin_lock(&ls->async_lock); |
| 511 | list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) { |
| 512 | list_del_init(&lp->delay_list); |
| 513 | list_add_tail(&lp->delay_list, &ls->submit); |
| 514 | } |
| 515 | spin_unlock(&ls->async_lock); |
| 516 | wake_up(&ls->thread_wait); |
| 517 | } |
| 518 | |
| 519 | int gdlm_release_all_locks(struct gdlm_ls *ls) |
| 520 | { |
| 521 | struct gdlm_lock *lp, *safe; |
| 522 | int count = 0; |
| 523 | |
| 524 | spin_lock(&ls->async_lock); |
| 525 | list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) { |
| 526 | list_del_init(&lp->all_list); |
| 527 | |
| 528 | if (lp->lvb && lp->lvb != junk_lvb) |
| 529 | kfree(lp->lvb); |
| 530 | kfree(lp); |
| 531 | count++; |
| 532 | } |
| 533 | spin_unlock(&ls->async_lock); |
| 534 | |
| 535 | return count; |
| 536 | } |
| 537 | |