| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1 | /* -*- mode: c; c-basic-offset: 8; -*- | 
|  | 2 | * vim: noexpandtab sw=8 ts=8 sts=0: | 
|  | 3 | * | 
|  | 4 | * dlmthread.c | 
|  | 5 | * | 
|  | 6 | * standalone DLM module | 
|  | 7 | * | 
|  | 8 | * Copyright (C) 2004 Oracle.  All rights reserved. | 
|  | 9 | * | 
|  | 10 | * This program is free software; you can redistribute it and/or | 
|  | 11 | * modify it under the terms of the GNU General Public | 
|  | 12 | * License as published by the Free Software Foundation; either | 
|  | 13 | * version 2 of the License, or (at your option) any later version. | 
|  | 14 | * | 
|  | 15 | * This program is distributed in the hope that it will be useful, | 
|  | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 18 | * General Public License for more details. | 
|  | 19 | * | 
|  | 20 | * You should have received a copy of the GNU General Public | 
|  | 21 | * License along with this program; if not, write to the | 
|  | 22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 23 | * Boston, MA 021110-1307, USA. | 
|  | 24 | * | 
|  | 25 | */ | 
|  | 26 |  | 
|  | 27 |  | 
|  | 28 | #include <linux/module.h> | 
|  | 29 | #include <linux/fs.h> | 
|  | 30 | #include <linux/types.h> | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 31 | #include <linux/highmem.h> | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 32 | #include <linux/init.h> | 
|  | 33 | #include <linux/sysctl.h> | 
|  | 34 | #include <linux/random.h> | 
|  | 35 | #include <linux/blkdev.h> | 
|  | 36 | #include <linux/socket.h> | 
|  | 37 | #include <linux/inet.h> | 
|  | 38 | #include <linux/timer.h> | 
|  | 39 | #include <linux/kthread.h> | 
| Kurt Hackel | 8d79d08 | 2006-04-27 17:58:23 -0700 | [diff] [blame] | 40 | #include <linux/delay.h> | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 41 |  | 
|  | 42 |  | 
|  | 43 | #include "cluster/heartbeat.h" | 
|  | 44 | #include "cluster/nodemanager.h" | 
|  | 45 | #include "cluster/tcp.h" | 
|  | 46 |  | 
|  | 47 | #include "dlmapi.h" | 
|  | 48 | #include "dlmcommon.h" | 
|  | 49 | #include "dlmdomain.h" | 
|  | 50 |  | 
|  | 51 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD) | 
|  | 52 | #include "cluster/masklog.h" | 
|  | 53 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 54 | static int dlm_thread(void *data); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 55 | static void dlm_flush_asts(struct dlm_ctxt *dlm); | 
|  | 56 |  | 
|  | 57 | #define dlm_lock_is_remote(dlm, lock)     ((lock)->ml.node != (dlm)->node_num) | 
|  | 58 |  | 
|  | 59 | /* will exit holding res->spinlock, but may drop in function */ | 
|  | 60 | /* waits until flags are cleared on res->state */ | 
|  | 61 | void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags) | 
|  | 62 | { | 
|  | 63 | DECLARE_WAITQUEUE(wait, current); | 
|  | 64 |  | 
|  | 65 | assert_spin_locked(&res->spinlock); | 
|  | 66 |  | 
|  | 67 | add_wait_queue(&res->wq, &wait); | 
|  | 68 | repeat: | 
|  | 69 | set_current_state(TASK_UNINTERRUPTIBLE); | 
|  | 70 | if (res->state & flags) { | 
|  | 71 | spin_unlock(&res->spinlock); | 
|  | 72 | schedule(); | 
|  | 73 | spin_lock(&res->spinlock); | 
|  | 74 | goto repeat; | 
|  | 75 | } | 
|  | 76 | remove_wait_queue(&res->wq, &wait); | 
| Milind Arun Choudhary | 5c2c9d3 | 2007-04-26 00:29:35 -0700 | [diff] [blame] | 77 | __set_current_state(TASK_RUNNING); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 78 | } | 
|  | 79 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 80 | int __dlm_lockres_has_locks(struct dlm_lock_resource *res) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 81 | { | 
|  | 82 | if (list_empty(&res->granted) && | 
|  | 83 | list_empty(&res->converting) && | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 84 | list_empty(&res->blocked)) | 
|  | 85 | return 0; | 
|  | 86 | return 1; | 
|  | 87 | } | 
|  | 88 |  | 
|  | 89 | /* "unused": the lockres has no locks, is not on the dirty list, | 
|  | 90 | * has no inflight locks (in the gap between mastery and acquiring | 
|  | 91 | * the first lock), and has no bits in its refmap. | 
|  | 92 | * truly ready to be freed. */ | 
|  | 93 | int __dlm_lockres_unused(struct dlm_lock_resource *res) | 
|  | 94 | { | 
| Wengang Wang | a524812 | 2010-07-30 16:14:44 +0800 | [diff] [blame] | 95 | int bit; | 
|  | 96 |  | 
|  | 97 | if (__dlm_lockres_has_locks(res)) | 
|  | 98 | return 0; | 
|  | 99 |  | 
|  | 100 | if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) | 
|  | 101 | return 0; | 
|  | 102 |  | 
|  | 103 | if (res->state & DLM_LOCK_RES_RECOVERING) | 
|  | 104 | return 0; | 
|  | 105 |  | 
|  | 106 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); | 
|  | 107 | if (bit < O2NM_MAX_NODES) | 
|  | 108 | return 0; | 
|  | 109 |  | 
|  | 110 | /* | 
|  | 111 | * since the bit for dlm->node_num is not set, inflight_locks better | 
|  | 112 | * be zero | 
|  | 113 | */ | 
|  | 114 | BUG_ON(res->inflight_locks != 0); | 
|  | 115 | return 1; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 116 | } | 
|  | 117 |  | 
|  | 118 |  | 
|  | 119 | /* Call whenever you may have added or deleted something from one of | 
|  | 120 | * the lockres queue's. This will figure out whether it belongs on the | 
|  | 121 | * unused list or not and does the appropriate thing. */ | 
|  | 122 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | 
|  | 123 | struct dlm_lock_resource *res) | 
|  | 124 | { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 125 | assert_spin_locked(&dlm->spinlock); | 
|  | 126 | assert_spin_locked(&res->spinlock); | 
|  | 127 |  | 
|  | 128 | if (__dlm_lockres_unused(res)){ | 
|  | 129 | if (list_empty(&res->purge)) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 130 | mlog(0, "%s: Adding res %.*s to purge list\n", | 
|  | 131 | dlm->name, res->lockname.len, res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 132 |  | 
|  | 133 | res->last_used = jiffies; | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 134 | dlm_lockres_get(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 135 | list_add_tail(&res->purge, &dlm->purge_list); | 
|  | 136 | dlm->purge_count++; | 
|  | 137 | } | 
|  | 138 | } else if (!list_empty(&res->purge)) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 139 | mlog(0, "%s: Removing res %.*s from purge list\n", | 
|  | 140 | dlm->name, res->lockname.len, res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 141 |  | 
|  | 142 | list_del_init(&res->purge); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 143 | dlm_lockres_put(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 144 | dlm->purge_count--; | 
|  | 145 | } | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | 
|  | 149 | struct dlm_lock_resource *res) | 
|  | 150 | { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 151 | spin_lock(&dlm->spinlock); | 
|  | 152 | spin_lock(&res->spinlock); | 
|  | 153 |  | 
|  | 154 | __dlm_lockres_calc_usage(dlm, res); | 
|  | 155 |  | 
|  | 156 | spin_unlock(&res->spinlock); | 
|  | 157 | spin_unlock(&dlm->spinlock); | 
|  | 158 | } | 
|  | 159 |  | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 160 | static void dlm_purge_lockres(struct dlm_ctxt *dlm, | 
| Adrian Bunk | faf0ec9 | 2006-12-14 00:17:32 +0100 | [diff] [blame] | 161 | struct dlm_lock_resource *res) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 162 | { | 
|  | 163 | int master; | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 164 | int ret = 0; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 165 |  | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 166 | assert_spin_locked(&dlm->spinlock); | 
|  | 167 | assert_spin_locked(&res->spinlock); | 
| Sunil Mushran | 516b7e5 | 2009-02-26 15:00:48 -0800 | [diff] [blame] | 168 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 169 | master = (res->owner == dlm->node_num); | 
| Sunil Mushran | 516b7e5 | 2009-02-26 15:00:48 -0800 | [diff] [blame] | 170 |  | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 171 | mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, | 
|  | 172 | res->lockname.len, res->lockname.name, master); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 173 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 174 | if (!master) { | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 175 | res->state |= DLM_LOCK_RES_DROPPING_REF; | 
| Sunil Mushran | c824c3c | 2008-03-01 14:04:25 -0800 | [diff] [blame] | 176 | /* drop spinlock...  retake below */ | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 177 | spin_unlock(&res->spinlock); | 
| Sunil Mushran | c824c3c | 2008-03-01 14:04:25 -0800 | [diff] [blame] | 178 | spin_unlock(&dlm->spinlock); | 
|  | 179 |  | 
| Kurt Hackel | 3b8118c | 2007-01-17 17:05:53 -0800 | [diff] [blame] | 180 | spin_lock(&res->spinlock); | 
|  | 181 | /* This ensures that clear refmap is sent after the set */ | 
| Sunil Mushran | 7dc102b | 2009-02-03 12:37:13 -0800 | [diff] [blame] | 182 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); | 
| Kurt Hackel | 3b8118c | 2007-01-17 17:05:53 -0800 | [diff] [blame] | 183 | spin_unlock(&res->spinlock); | 
| Sunil Mushran | c824c3c | 2008-03-01 14:04:25 -0800 | [diff] [blame] | 184 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 185 | /* clear our bit from the master's refmap, ignore errors */ | 
|  | 186 | ret = dlm_drop_lockres_ref(dlm, res); | 
|  | 187 | if (ret < 0) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 188 | mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name, | 
|  | 189 | res->lockname.len, res->lockname.name, ret); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 190 | if (!dlm_is_host_down(ret)) | 
|  | 191 | BUG(); | 
|  | 192 | } | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 193 | spin_lock(&dlm->spinlock); | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 194 | spin_lock(&res->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 195 | } | 
|  | 196 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 197 | if (!list_empty(&res->purge)) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 198 | mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", | 
|  | 199 | dlm->name, res->lockname.len, res->lockname.name, master); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 200 | list_del_init(&res->purge); | 
|  | 201 | dlm_lockres_put(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 202 | dlm->purge_count--; | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 203 | } | 
|  | 204 |  | 
|  | 205 | if (!__dlm_lockres_unused(res)) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 206 | mlog(ML_ERROR, "%s: res %.*s in use after deref\n", | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 207 | dlm->name, res->lockname.len, res->lockname.name); | 
|  | 208 | __dlm_print_one_lock_resource(res); | 
|  | 209 | BUG(); | 
|  | 210 | } | 
| Wengang Wang | 83e32d9 | 2009-09-03 15:56:33 +0800 | [diff] [blame] | 211 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 212 | __dlm_unhash_lockres(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 213 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 214 | /* lockres is not in the hash now.  drop the flag and wake up | 
|  | 215 | * any processes waiting in dlm_get_lock_resource. */ | 
|  | 216 | if (!master) { | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 217 | res->state &= ~DLM_LOCK_RES_DROPPING_REF; | 
|  | 218 | spin_unlock(&res->spinlock); | 
|  | 219 | wake_up(&res->wq); | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 220 | } else | 
|  | 221 | spin_unlock(&res->spinlock); | 
| Kurt Hackel | 8b21980 | 2006-05-01 11:16:45 -0700 | [diff] [blame] | 222 | } | 
|  | 223 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 224 | static void dlm_run_purge_list(struct dlm_ctxt *dlm, | 
|  | 225 | int purge_now) | 
|  | 226 | { | 
|  | 227 | unsigned int run_max, unused; | 
|  | 228 | unsigned long purge_jiffies; | 
|  | 229 | struct dlm_lock_resource *lockres; | 
|  | 230 |  | 
|  | 231 | spin_lock(&dlm->spinlock); | 
|  | 232 | run_max = dlm->purge_count; | 
|  | 233 |  | 
|  | 234 | while(run_max && !list_empty(&dlm->purge_list)) { | 
|  | 235 | run_max--; | 
|  | 236 |  | 
|  | 237 | lockres = list_entry(dlm->purge_list.next, | 
|  | 238 | struct dlm_lock_resource, purge); | 
|  | 239 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 240 | spin_lock(&lockres->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 241 |  | 
|  | 242 | purge_jiffies = lockres->last_used + | 
|  | 243 | msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); | 
|  | 244 |  | 
|  | 245 | /* Make sure that we want to be processing this guy at | 
|  | 246 | * this time. */ | 
|  | 247 | if (!purge_now && time_after(purge_jiffies, jiffies)) { | 
|  | 248 | /* Since resources are added to the purge list | 
|  | 249 | * in tail order, we can stop at the first | 
|  | 250 | * unpurgable resource -- anyone added after | 
|  | 251 | * him will have a greater last_used value */ | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 252 | spin_unlock(&lockres->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 253 | break; | 
|  | 254 | } | 
|  | 255 |  | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 256 | /* Status of the lockres *might* change so double | 
|  | 257 | * check. If the lockres is unused, holding the dlm | 
|  | 258 | * spinlock will prevent people from getting and more | 
|  | 259 | * refs on it. */ | 
|  | 260 | unused = __dlm_lockres_unused(lockres); | 
|  | 261 | if (!unused || | 
|  | 262 | (lockres->state & DLM_LOCK_RES_MIGRATING)) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 263 | mlog(0, "%s: res %.*s is in use or being remastered, " | 
|  | 264 | "used %d, state %d\n", dlm->name, | 
|  | 265 | lockres->lockname.len, lockres->lockname.name, | 
|  | 266 | !unused, lockres->state); | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 267 | list_move_tail(&dlm->purge_list, &lockres->purge); | 
|  | 268 | spin_unlock(&lockres->spinlock); | 
|  | 269 | continue; | 
|  | 270 | } | 
|  | 271 |  | 
| Sunil Mushran | 78062cb | 2007-03-22 17:01:07 -0700 | [diff] [blame] | 272 | dlm_lockres_get(lockres); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 273 |  | 
| Srinivas Eeda | 7beaf24 | 2010-07-19 16:04:12 -0700 | [diff] [blame] | 274 | dlm_purge_lockres(dlm, lockres); | 
| Sunil Mushran | 78062cb | 2007-03-22 17:01:07 -0700 | [diff] [blame] | 275 |  | 
| Sunil Mushran | 3fca089 | 2007-03-12 13:24:34 -0700 | [diff] [blame] | 276 | dlm_lockres_put(lockres); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 277 |  | 
|  | 278 | /* Avoid adding any scheduling latencies */ | 
|  | 279 | cond_resched_lock(&dlm->spinlock); | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | spin_unlock(&dlm->spinlock); | 
|  | 283 | } | 
|  | 284 |  | 
|  | 285 | static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | 
|  | 286 | struct dlm_lock_resource *res) | 
|  | 287 | { | 
|  | 288 | struct dlm_lock *lock, *target; | 
|  | 289 | struct list_head *iter; | 
|  | 290 | struct list_head *head; | 
|  | 291 | int can_grant = 1; | 
|  | 292 |  | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 293 | /* | 
|  | 294 | * Because this function is called with the lockres | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 295 | * spinlock, and because we know that it is not migrating/ | 
|  | 296 | * recovering/in-progress, it is fine to reserve asts and | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 297 | * basts right before queueing them all throughout | 
|  | 298 | */ | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 299 | assert_spin_locked(&dlm->ast_lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 300 | assert_spin_locked(&res->spinlock); | 
|  | 301 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| | 
|  | 302 | DLM_LOCK_RES_RECOVERING| | 
|  | 303 | DLM_LOCK_RES_IN_PROGRESS))); | 
|  | 304 |  | 
|  | 305 | converting: | 
|  | 306 | if (list_empty(&res->converting)) | 
|  | 307 | goto blocked; | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 308 | mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name, | 
|  | 309 | res->lockname.len, res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 310 |  | 
|  | 311 | target = list_entry(res->converting.next, struct dlm_lock, list); | 
|  | 312 | if (target->ml.convert_type == LKM_IVMODE) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 313 | mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n", | 
|  | 314 | dlm->name, res->lockname.len, res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 315 | BUG(); | 
|  | 316 | } | 
|  | 317 | head = &res->granted; | 
|  | 318 | list_for_each(iter, head) { | 
|  | 319 | lock = list_entry(iter, struct dlm_lock, list); | 
|  | 320 | if (lock==target) | 
|  | 321 | continue; | 
|  | 322 | if (!dlm_lock_compatible(lock->ml.type, | 
|  | 323 | target->ml.convert_type)) { | 
|  | 324 | can_grant = 0; | 
|  | 325 | /* queue the BAST if not already */ | 
|  | 326 | if (lock->ml.highest_blocked == LKM_IVMODE) { | 
|  | 327 | __dlm_lockres_reserve_ast(res); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 328 | __dlm_queue_bast(dlm, lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 329 | } | 
|  | 330 | /* update the highest_blocked if needed */ | 
|  | 331 | if (lock->ml.highest_blocked < target->ml.convert_type) | 
|  | 332 | lock->ml.highest_blocked = | 
|  | 333 | target->ml.convert_type; | 
|  | 334 | } | 
|  | 335 | } | 
|  | 336 | head = &res->converting; | 
|  | 337 | list_for_each(iter, head) { | 
|  | 338 | lock = list_entry(iter, struct dlm_lock, list); | 
|  | 339 | if (lock==target) | 
|  | 340 | continue; | 
|  | 341 | if (!dlm_lock_compatible(lock->ml.type, | 
|  | 342 | target->ml.convert_type)) { | 
|  | 343 | can_grant = 0; | 
|  | 344 | if (lock->ml.highest_blocked == LKM_IVMODE) { | 
|  | 345 | __dlm_lockres_reserve_ast(res); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 346 | __dlm_queue_bast(dlm, lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 347 | } | 
|  | 348 | if (lock->ml.highest_blocked < target->ml.convert_type) | 
|  | 349 | lock->ml.highest_blocked = | 
|  | 350 | target->ml.convert_type; | 
|  | 351 | } | 
|  | 352 | } | 
|  | 353 |  | 
|  | 354 | /* we can convert the lock */ | 
|  | 355 | if (can_grant) { | 
|  | 356 | spin_lock(&target->spinlock); | 
|  | 357 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | 
|  | 358 |  | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 359 | mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type " | 
|  | 360 | "%d => %d, node %u\n", dlm->name, res->lockname.len, | 
|  | 361 | res->lockname.name, | 
|  | 362 | dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), | 
|  | 363 | dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), | 
|  | 364 | target->ml.type, | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 365 | target->ml.convert_type, target->ml.node); | 
|  | 366 |  | 
|  | 367 | target->ml.type = target->ml.convert_type; | 
|  | 368 | target->ml.convert_type = LKM_IVMODE; | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 369 | list_move_tail(&target->list, &res->granted); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 370 |  | 
|  | 371 | BUG_ON(!target->lksb); | 
|  | 372 | target->lksb->status = DLM_NORMAL; | 
|  | 373 |  | 
|  | 374 | spin_unlock(&target->spinlock); | 
|  | 375 |  | 
|  | 376 | __dlm_lockres_reserve_ast(res); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 377 | __dlm_queue_ast(dlm, target); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 378 | /* go back and check for more */ | 
|  | 379 | goto converting; | 
|  | 380 | } | 
|  | 381 |  | 
|  | 382 | blocked: | 
|  | 383 | if (list_empty(&res->blocked)) | 
|  | 384 | goto leave; | 
|  | 385 | target = list_entry(res->blocked.next, struct dlm_lock, list); | 
|  | 386 |  | 
|  | 387 | head = &res->granted; | 
|  | 388 | list_for_each(iter, head) { | 
|  | 389 | lock = list_entry(iter, struct dlm_lock, list); | 
|  | 390 | if (lock==target) | 
|  | 391 | continue; | 
|  | 392 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { | 
|  | 393 | can_grant = 0; | 
|  | 394 | if (lock->ml.highest_blocked == LKM_IVMODE) { | 
|  | 395 | __dlm_lockres_reserve_ast(res); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 396 | __dlm_queue_bast(dlm, lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 397 | } | 
|  | 398 | if (lock->ml.highest_blocked < target->ml.type) | 
|  | 399 | lock->ml.highest_blocked = target->ml.type; | 
|  | 400 | } | 
|  | 401 | } | 
|  | 402 |  | 
|  | 403 | head = &res->converting; | 
|  | 404 | list_for_each(iter, head) { | 
|  | 405 | lock = list_entry(iter, struct dlm_lock, list); | 
|  | 406 | if (lock==target) | 
|  | 407 | continue; | 
|  | 408 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { | 
|  | 409 | can_grant = 0; | 
|  | 410 | if (lock->ml.highest_blocked == LKM_IVMODE) { | 
|  | 411 | __dlm_lockres_reserve_ast(res); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 412 | __dlm_queue_bast(dlm, lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 413 | } | 
|  | 414 | if (lock->ml.highest_blocked < target->ml.type) | 
|  | 415 | lock->ml.highest_blocked = target->ml.type; | 
|  | 416 | } | 
|  | 417 | } | 
|  | 418 |  | 
|  | 419 | /* we can grant the blocked lock (only | 
|  | 420 | * possible if converting list empty) */ | 
|  | 421 | if (can_grant) { | 
|  | 422 | spin_lock(&target->spinlock); | 
|  | 423 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | 
|  | 424 |  | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 425 | mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, " | 
|  | 426 | "node %u\n", dlm->name, res->lockname.len, | 
|  | 427 | res->lockname.name, | 
|  | 428 | dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), | 
|  | 429 | dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 430 | target->ml.type, target->ml.node); | 
|  | 431 |  | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 432 | /* target->ml.type is already correct */ | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 433 | list_move_tail(&target->list, &res->granted); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 434 |  | 
|  | 435 | BUG_ON(!target->lksb); | 
|  | 436 | target->lksb->status = DLM_NORMAL; | 
|  | 437 |  | 
|  | 438 | spin_unlock(&target->spinlock); | 
|  | 439 |  | 
|  | 440 | __dlm_lockres_reserve_ast(res); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 441 | __dlm_queue_ast(dlm, target); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 442 | /* go back and check for more */ | 
|  | 443 | goto converting; | 
|  | 444 | } | 
|  | 445 |  | 
|  | 446 | leave: | 
|  | 447 | return; | 
|  | 448 | } | 
|  | 449 |  | 
|  | 450 | /* must have NO locks when calling this with res !=NULL * */ | 
|  | 451 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | 
|  | 452 | { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 453 | if (res) { | 
|  | 454 | spin_lock(&dlm->spinlock); | 
|  | 455 | spin_lock(&res->spinlock); | 
|  | 456 | __dlm_dirty_lockres(dlm, res); | 
|  | 457 | spin_unlock(&res->spinlock); | 
|  | 458 | spin_unlock(&dlm->spinlock); | 
|  | 459 | } | 
|  | 460 | wake_up(&dlm->dlm_thread_wq); | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | 
|  | 464 | { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 465 | assert_spin_locked(&dlm->spinlock); | 
|  | 466 | assert_spin_locked(&res->spinlock); | 
|  | 467 |  | 
|  | 468 | /* don't shuffle secondary queues */ | 
| Kurt Hackel | ddc09c8 | 2007-01-05 15:00:17 -0800 | [diff] [blame] | 469 | if ((res->owner == dlm->node_num)) { | 
|  | 470 | if (res->state & (DLM_LOCK_RES_MIGRATING | | 
|  | 471 | DLM_LOCK_RES_BLOCK_DIRTY)) | 
|  | 472 | return; | 
|  | 473 |  | 
|  | 474 | if (list_empty(&res->dirty)) { | 
|  | 475 | /* ref for dirty_list */ | 
|  | 476 | dlm_lockres_get(res); | 
|  | 477 | list_add_tail(&res->dirty, &dlm->dirty_list); | 
|  | 478 | res->state |= DLM_LOCK_RES_DIRTY; | 
|  | 479 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 480 | } | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 481 |  | 
|  | 482 | mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, | 
|  | 483 | res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 484 | } | 
|  | 485 |  | 
|  | 486 |  | 
|  | 487 | /* Launch the NM thread for the mounted volume */ | 
|  | 488 | int dlm_launch_thread(struct dlm_ctxt *dlm) | 
|  | 489 | { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 490 | mlog(0, "Starting dlm_thread...\n"); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 491 |  | 
|  | 492 | dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); | 
|  | 493 | if (IS_ERR(dlm->dlm_thread_task)) { | 
|  | 494 | mlog_errno(PTR_ERR(dlm->dlm_thread_task)); | 
|  | 495 | dlm->dlm_thread_task = NULL; | 
|  | 496 | return -EINVAL; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | return 0; | 
|  | 500 | } | 
|  | 501 |  | 
|  | 502 | void dlm_complete_thread(struct dlm_ctxt *dlm) | 
|  | 503 | { | 
|  | 504 | if (dlm->dlm_thread_task) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 505 | mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n"); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 506 | kthread_stop(dlm->dlm_thread_task); | 
|  | 507 | dlm->dlm_thread_task = NULL; | 
|  | 508 | } | 
|  | 509 | } | 
|  | 510 |  | 
|  | 511 | static int dlm_dirty_list_empty(struct dlm_ctxt *dlm) | 
|  | 512 | { | 
|  | 513 | int empty; | 
|  | 514 |  | 
|  | 515 | spin_lock(&dlm->spinlock); | 
|  | 516 | empty = list_empty(&dlm->dirty_list); | 
|  | 517 | spin_unlock(&dlm->spinlock); | 
|  | 518 |  | 
|  | 519 | return empty; | 
|  | 520 | } | 
|  | 521 |  | 
|  | 522 | static void dlm_flush_asts(struct dlm_ctxt *dlm) | 
|  | 523 | { | 
|  | 524 | int ret; | 
|  | 525 | struct dlm_lock *lock; | 
|  | 526 | struct dlm_lock_resource *res; | 
|  | 527 | u8 hi; | 
|  | 528 |  | 
|  | 529 | spin_lock(&dlm->ast_lock); | 
|  | 530 | while (!list_empty(&dlm->pending_asts)) { | 
|  | 531 | lock = list_entry(dlm->pending_asts.next, | 
|  | 532 | struct dlm_lock, ast_list); | 
|  | 533 | /* get an extra ref on lock */ | 
|  | 534 | dlm_lock_get(lock); | 
|  | 535 | res = lock->lockres; | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 536 | mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, " | 
|  | 537 | "node %u\n", dlm->name, res->lockname.len, | 
|  | 538 | res->lockname.name, | 
|  | 539 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | 
|  | 540 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | 
|  | 541 | lock->ml.type, lock->ml.node); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 542 |  | 
|  | 543 | BUG_ON(!lock->ast_pending); | 
|  | 544 |  | 
|  | 545 | /* remove from list (including ref) */ | 
|  | 546 | list_del_init(&lock->ast_list); | 
|  | 547 | dlm_lock_put(lock); | 
|  | 548 | spin_unlock(&dlm->ast_lock); | 
|  | 549 |  | 
|  | 550 | if (lock->ml.node != dlm->node_num) { | 
|  | 551 | ret = dlm_do_remote_ast(dlm, res, lock); | 
|  | 552 | if (ret < 0) | 
|  | 553 | mlog_errno(ret); | 
|  | 554 | } else | 
|  | 555 | dlm_do_local_ast(dlm, res, lock); | 
|  | 556 |  | 
|  | 557 | spin_lock(&dlm->ast_lock); | 
|  | 558 |  | 
|  | 559 | /* possible that another ast was queued while | 
|  | 560 | * we were delivering the last one */ | 
|  | 561 | if (!list_empty(&lock->ast_list)) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 562 | mlog(0, "%s: res %.*s, AST queued while flushing last " | 
|  | 563 | "one\n", dlm->name, res->lockname.len, | 
|  | 564 | res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 565 | } else | 
|  | 566 | lock->ast_pending = 0; | 
|  | 567 |  | 
|  | 568 | /* drop the extra ref. | 
|  | 569 | * this may drop it completely. */ | 
|  | 570 | dlm_lock_put(lock); | 
|  | 571 | dlm_lockres_release_ast(dlm, res); | 
|  | 572 | } | 
|  | 573 |  | 
|  | 574 | while (!list_empty(&dlm->pending_basts)) { | 
|  | 575 | lock = list_entry(dlm->pending_basts.next, | 
|  | 576 | struct dlm_lock, bast_list); | 
|  | 577 | /* get an extra ref on lock */ | 
|  | 578 | dlm_lock_get(lock); | 
|  | 579 | res = lock->lockres; | 
|  | 580 |  | 
|  | 581 | BUG_ON(!lock->bast_pending); | 
|  | 582 |  | 
|  | 583 | /* get the highest blocked lock, and reset */ | 
|  | 584 | spin_lock(&lock->spinlock); | 
|  | 585 | BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE); | 
|  | 586 | hi = lock->ml.highest_blocked; | 
|  | 587 | lock->ml.highest_blocked = LKM_IVMODE; | 
|  | 588 | spin_unlock(&lock->spinlock); | 
|  | 589 |  | 
|  | 590 | /* remove from list (including ref) */ | 
|  | 591 | list_del_init(&lock->bast_list); | 
|  | 592 | dlm_lock_put(lock); | 
|  | 593 | spin_unlock(&dlm->ast_lock); | 
|  | 594 |  | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 595 | mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, " | 
|  | 596 | "blocked %d, node %u\n", | 
|  | 597 | dlm->name, res->lockname.len, res->lockname.name, | 
|  | 598 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | 
|  | 599 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | 
|  | 600 | hi, lock->ml.node); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 601 |  | 
|  | 602 | if (lock->ml.node != dlm->node_num) { | 
|  | 603 | ret = dlm_send_proxy_bast(dlm, res, lock, hi); | 
|  | 604 | if (ret < 0) | 
|  | 605 | mlog_errno(ret); | 
|  | 606 | } else | 
|  | 607 | dlm_do_local_bast(dlm, res, lock, hi); | 
|  | 608 |  | 
|  | 609 | spin_lock(&dlm->ast_lock); | 
|  | 610 |  | 
|  | 611 | /* possible that another bast was queued while | 
|  | 612 | * we were delivering the last one */ | 
|  | 613 | if (!list_empty(&lock->bast_list)) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 614 | mlog(0, "%s: res %.*s, BAST queued while flushing last " | 
|  | 615 | "one\n", dlm->name, res->lockname.len, | 
|  | 616 | res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 617 | } else | 
|  | 618 | lock->bast_pending = 0; | 
|  | 619 |  | 
|  | 620 | /* drop the extra ref. | 
|  | 621 | * this may drop it completely. */ | 
|  | 622 | dlm_lock_put(lock); | 
|  | 623 | dlm_lockres_release_ast(dlm, res); | 
|  | 624 | } | 
|  | 625 | wake_up(&dlm->ast_wq); | 
|  | 626 | spin_unlock(&dlm->ast_lock); | 
|  | 627 | } | 
|  | 628 |  | 
|  | 629 |  | 
|  | 630 | #define DLM_THREAD_TIMEOUT_MS (4 * 1000) | 
|  | 631 | #define DLM_THREAD_MAX_DIRTY  100 | 
|  | 632 | #define DLM_THREAD_MAX_ASTS   10 | 
|  | 633 |  | 
|  | 634 | static int dlm_thread(void *data) | 
|  | 635 | { | 
|  | 636 | struct dlm_lock_resource *res; | 
|  | 637 | struct dlm_ctxt *dlm = data; | 
|  | 638 | unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS); | 
|  | 639 |  | 
|  | 640 | mlog(0, "dlm thread running for %s...\n", dlm->name); | 
|  | 641 |  | 
|  | 642 | while (!kthread_should_stop()) { | 
|  | 643 | int n = DLM_THREAD_MAX_DIRTY; | 
|  | 644 |  | 
|  | 645 | /* dlm_shutting_down is very point-in-time, but that | 
|  | 646 | * doesn't matter as we'll just loop back around if we | 
|  | 647 | * get false on the leading edge of a state | 
|  | 648 | * transition. */ | 
|  | 649 | dlm_run_purge_list(dlm, dlm_shutting_down(dlm)); | 
|  | 650 |  | 
|  | 651 | /* We really don't want to hold dlm->spinlock while | 
|  | 652 | * calling dlm_shuffle_lists on each lockres that | 
|  | 653 | * needs to have its queues adjusted and AST/BASTs | 
|  | 654 | * run.  So let's pull each entry off the dirty_list | 
|  | 655 | * and drop dlm->spinlock ASAP.  Once off the list, | 
|  | 656 | * res->spinlock needs to be taken again to protect | 
|  | 657 | * the queues while calling dlm_shuffle_lists.  */ | 
|  | 658 | spin_lock(&dlm->spinlock); | 
|  | 659 | while (!list_empty(&dlm->dirty_list)) { | 
|  | 660 | int delay = 0; | 
|  | 661 | res = list_entry(dlm->dirty_list.next, | 
|  | 662 | struct dlm_lock_resource, dirty); | 
|  | 663 |  | 
|  | 664 | /* peel a lockres off, remove it from the list, | 
|  | 665 | * unset the dirty flag and drop the dlm lock */ | 
|  | 666 | BUG_ON(!res); | 
|  | 667 | dlm_lockres_get(res); | 
|  | 668 |  | 
|  | 669 | spin_lock(&res->spinlock); | 
| Kurt Hackel | ddc09c8 | 2007-01-05 15:00:17 -0800 | [diff] [blame] | 670 | /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 671 | list_del_init(&res->dirty); | 
|  | 672 | spin_unlock(&res->spinlock); | 
|  | 673 | spin_unlock(&dlm->spinlock); | 
| Kurt Hackel | 6ff06a9 | 2006-05-01 11:51:45 -0700 | [diff] [blame] | 674 | /* Drop dirty_list ref */ | 
|  | 675 | dlm_lockres_put(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 676 |  | 
|  | 677 | /* lockres can be re-dirtied/re-added to the | 
|  | 678 | * dirty_list in this gap, but that is ok */ | 
|  | 679 |  | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 680 | spin_lock(&dlm->ast_lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 681 | spin_lock(&res->spinlock); | 
|  | 682 | if (res->owner != dlm->node_num) { | 
|  | 683 | __dlm_print_one_lock_resource(res); | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 684 | mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d," | 
|  | 685 | " dirty %d\n", dlm->name, | 
|  | 686 | !!(res->state & DLM_LOCK_RES_IN_PROGRESS), | 
|  | 687 | !!(res->state & DLM_LOCK_RES_MIGRATING), | 
|  | 688 | !!(res->state & DLM_LOCK_RES_RECOVERING), | 
|  | 689 | !!(res->state & DLM_LOCK_RES_DIRTY)); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 690 | } | 
|  | 691 | BUG_ON(res->owner != dlm->node_num); | 
|  | 692 |  | 
|  | 693 | /* it is now ok to move lockreses in these states | 
|  | 694 | * to the dirty list, assuming that they will only be | 
|  | 695 | * dirty for a short while. */ | 
| Kurt Hackel | ddc09c8 | 2007-01-05 15:00:17 -0800 | [diff] [blame] | 696 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 697 | if (res->state & (DLM_LOCK_RES_IN_PROGRESS | | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 698 | DLM_LOCK_RES_RECOVERING)) { | 
|  | 699 | /* move it to the tail and keep going */ | 
| Kurt Hackel | ddc09c8 | 2007-01-05 15:00:17 -0800 | [diff] [blame] | 700 | res->state &= ~DLM_LOCK_RES_DIRTY; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 701 | spin_unlock(&res->spinlock); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 702 | spin_unlock(&dlm->ast_lock); | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 703 | mlog(0, "%s: res %.*s, inprogress, delay list " | 
|  | 704 | "shuffle, state %d\n", dlm->name, | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 705 | res->lockname.len, res->lockname.name, | 
|  | 706 | res->state); | 
|  | 707 | delay = 1; | 
|  | 708 | goto in_progress; | 
|  | 709 | } | 
|  | 710 |  | 
|  | 711 | /* at this point the lockres is not migrating/ | 
|  | 712 | * recovering/in-progress.  we have the lockres | 
|  | 713 | * spinlock and do NOT have the dlm lock. | 
|  | 714 | * safe to reserve/queue asts and run the lists. */ | 
|  | 715 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 716 | /* called while holding lockres lock */ | 
|  | 717 | dlm_shuffle_lists(dlm, res); | 
| Kurt Hackel | ddc09c8 | 2007-01-05 15:00:17 -0800 | [diff] [blame] | 718 | res->state &= ~DLM_LOCK_RES_DIRTY; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 719 | spin_unlock(&res->spinlock); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 720 | spin_unlock(&dlm->ast_lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 721 |  | 
|  | 722 | dlm_lockres_calc_usage(dlm, res); | 
|  | 723 |  | 
|  | 724 | in_progress: | 
|  | 725 |  | 
|  | 726 | spin_lock(&dlm->spinlock); | 
|  | 727 | /* if the lock was in-progress, stick | 
|  | 728 | * it on the back of the list */ | 
|  | 729 | if (delay) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 730 | spin_lock(&res->spinlock); | 
| Kurt Hackel | ddc09c8 | 2007-01-05 15:00:17 -0800 | [diff] [blame] | 731 | __dlm_dirty_lockres(dlm, res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 732 | spin_unlock(&res->spinlock); | 
|  | 733 | } | 
|  | 734 | dlm_lockres_put(res); | 
|  | 735 |  | 
|  | 736 | /* unlikely, but we may need to give time to | 
|  | 737 | * other tasks */ | 
|  | 738 | if (!--n) { | 
| Sunil Mushran | 8e17d16 | 2010-11-19 15:06:49 -0800 | [diff] [blame] | 739 | mlog(0, "%s: Throttling dlm thread\n", | 
|  | 740 | dlm->name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 741 | break; | 
|  | 742 | } | 
|  | 743 | } | 
|  | 744 |  | 
|  | 745 | spin_unlock(&dlm->spinlock); | 
|  | 746 | dlm_flush_asts(dlm); | 
|  | 747 |  | 
|  | 748 | /* yield and continue right away if there is more work to do */ | 
|  | 749 | if (!n) { | 
| Kurt Hackel | f85cd47 | 2006-05-01 14:27:41 -0700 | [diff] [blame] | 750 | cond_resched(); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 751 | continue; | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | wait_event_interruptible_timeout(dlm->dlm_thread_wq, | 
|  | 755 | !dlm_dirty_list_empty(dlm) || | 
|  | 756 | kthread_should_stop(), | 
|  | 757 | timeout); | 
|  | 758 | } | 
|  | 759 |  | 
|  | 760 | mlog(0, "quitting DLM thread\n"); | 
|  | 761 | return 0; | 
|  | 762 | } |