| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1 | /* -*- mode: c; c-basic-offset: 8; -*- | 
|  | 2 | * vim: noexpandtab sw=8 ts=8 sts=0: | 
|  | 3 | * | 
|  | 4 | * dlmrecovery.c | 
|  | 5 | * | 
|  | 6 | * recovery stuff | 
|  | 7 | * | 
|  | 8 | * Copyright (C) 2004 Oracle.  All rights reserved. | 
|  | 9 | * | 
|  | 10 | * This program is free software; you can redistribute it and/or | 
|  | 11 | * modify it under the terms of the GNU General Public | 
|  | 12 | * License as published by the Free Software Foundation; either | 
|  | 13 | * version 2 of the License, or (at your option) any later version. | 
|  | 14 | * | 
|  | 15 | * This program is distributed in the hope that it will be useful, | 
|  | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 18 | * General Public License for more details. | 
|  | 19 | * | 
|  | 20 | * You should have received a copy of the GNU General Public | 
|  | 21 | * License along with this program; if not, write to the | 
|  | 22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 23 | * Boston, MA 021110-1307, USA. | 
|  | 24 | * | 
|  | 25 | */ | 
|  | 26 |  | 
|  | 27 |  | 
|  | 28 | #include <linux/module.h> | 
|  | 29 | #include <linux/fs.h> | 
|  | 30 | #include <linux/types.h> | 
|  | 31 | #include <linux/slab.h> | 
|  | 32 | #include <linux/highmem.h> | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 33 | #include <linux/init.h> | 
|  | 34 | #include <linux/sysctl.h> | 
|  | 35 | #include <linux/random.h> | 
|  | 36 | #include <linux/blkdev.h> | 
|  | 37 | #include <linux/socket.h> | 
|  | 38 | #include <linux/inet.h> | 
|  | 39 | #include <linux/timer.h> | 
|  | 40 | #include <linux/kthread.h> | 
| Adrian Bunk | b4c7f53 | 2006-01-14 20:55:10 +0100 | [diff] [blame] | 41 | #include <linux/delay.h> | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 42 |  | 
|  | 43 |  | 
|  | 44 | #include "cluster/heartbeat.h" | 
|  | 45 | #include "cluster/nodemanager.h" | 
|  | 46 | #include "cluster/tcp.h" | 
|  | 47 |  | 
|  | 48 | #include "dlmapi.h" | 
|  | 49 | #include "dlmcommon.h" | 
|  | 50 | #include "dlmdomain.h" | 
|  | 51 |  | 
|  | 52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) | 
|  | 53 | #include "cluster/masklog.h" | 
|  | 54 |  | 
|  | 55 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); | 
|  | 56 |  | 
|  | 57 | static int dlm_recovery_thread(void *data); | 
|  | 58 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | 
|  | 59 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 60 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 61 | static int dlm_do_recovery(struct dlm_ctxt *dlm); | 
|  | 62 |  | 
|  | 63 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); | 
|  | 64 | static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); | 
|  | 65 | static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); | 
|  | 66 | static int dlm_request_all_locks(struct dlm_ctxt *dlm, | 
|  | 67 | u8 request_from, u8 dead_node); | 
|  | 68 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); | 
|  | 69 |  | 
|  | 70 | static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); | 
|  | 71 | static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, | 
|  | 72 | const char *lockname, int namelen, | 
|  | 73 | int total_locks, u64 cookie, | 
|  | 74 | u8 flags, u8 master); | 
|  | 75 | static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | 
|  | 76 | struct dlm_migratable_lockres *mres, | 
|  | 77 | u8 send_to, | 
|  | 78 | struct dlm_lock_resource *res, | 
|  | 79 | int total_locks); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 80 | static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | 
|  | 81 | struct dlm_lock_resource *res, | 
|  | 82 | struct dlm_migratable_lockres *mres); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 83 | static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); | 
|  | 84 | static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, | 
|  | 85 | u8 dead_node, u8 send_to); | 
|  | 86 | static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); | 
|  | 87 | static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, | 
|  | 88 | struct list_head *list, u8 dead_node); | 
|  | 89 | static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | 
|  | 90 | u8 dead_node, u8 new_master); | 
|  | 91 | static void dlm_reco_ast(void *astdata); | 
|  | 92 | static void dlm_reco_bast(void *astdata, int blocked_type); | 
|  | 93 | static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); | 
|  | 94 | static void dlm_request_all_locks_worker(struct dlm_work_item *item, | 
|  | 95 | void *data); | 
|  | 96 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); | 
| Adrian Bunk | 8169cae | 2006-03-31 16:53:55 +0200 | [diff] [blame] | 97 | static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, | 
|  | 98 | struct dlm_lock_resource *res, | 
|  | 99 | u8 *real_master); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 100 |  | 
|  | 101 | static u64 dlm_get_next_mig_cookie(void); | 
|  | 102 |  | 
| Ingo Molnar | 34af946 | 2006-06-27 02:53:55 -0700 | [diff] [blame] | 103 | static DEFINE_SPINLOCK(dlm_reco_state_lock); | 
|  | 104 | static DEFINE_SPINLOCK(dlm_mig_cookie_lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 105 | static u64 dlm_mig_cookie = 1; | 
|  | 106 |  | 
|  | 107 | static u64 dlm_get_next_mig_cookie(void) | 
|  | 108 | { | 
|  | 109 | u64 c; | 
|  | 110 | spin_lock(&dlm_mig_cookie_lock); | 
|  | 111 | c = dlm_mig_cookie; | 
|  | 112 | if (dlm_mig_cookie == (~0ULL)) | 
|  | 113 | dlm_mig_cookie = 1; | 
|  | 114 | else | 
|  | 115 | dlm_mig_cookie++; | 
|  | 116 | spin_unlock(&dlm_mig_cookie_lock); | 
|  | 117 | return c; | 
|  | 118 | } | 
|  | 119 |  | 
| Kurt Hackel | ab27eb6 | 2006-04-27 18:03:49 -0700 | [diff] [blame] | 120 | static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, | 
|  | 121 | u8 dead_node) | 
|  | 122 | { | 
|  | 123 | assert_spin_locked(&dlm->spinlock); | 
|  | 124 | if (dlm->reco.dead_node != dead_node) | 
|  | 125 | mlog(0, "%s: changing dead_node from %u to %u\n", | 
|  | 126 | dlm->name, dlm->reco.dead_node, dead_node); | 
|  | 127 | dlm->reco.dead_node = dead_node; | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, | 
|  | 131 | u8 master) | 
|  | 132 | { | 
|  | 133 | assert_spin_locked(&dlm->spinlock); | 
|  | 134 | mlog(0, "%s: changing new_master from %u to %u\n", | 
|  | 135 | dlm->name, dlm->reco.new_master, master); | 
|  | 136 | dlm->reco.new_master = master; | 
|  | 137 | } | 
|  | 138 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 139 | static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 140 | { | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 141 | assert_spin_locked(&dlm->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 142 | clear_bit(dlm->reco.dead_node, dlm->recovery_map); | 
| Kurt Hackel | ab27eb6 | 2006-04-27 18:03:49 -0700 | [diff] [blame] | 143 | dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); | 
|  | 144 | dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 145 | } | 
|  | 146 |  | 
|  | 147 | static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) | 
|  | 148 | { | 
|  | 149 | spin_lock(&dlm->spinlock); | 
|  | 150 | __dlm_reset_recovery(dlm); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 151 | spin_unlock(&dlm->spinlock); | 
|  | 152 | } | 
|  | 153 |  | 
|  | 154 | /* Worker function used during recovery. */ | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 155 | void dlm_dispatch_work(struct work_struct *work) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 156 | { | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 157 | struct dlm_ctxt *dlm = | 
|  | 158 | container_of(work, struct dlm_ctxt, dispatched_work); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 159 | LIST_HEAD(tmp_list); | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 160 | struct dlm_work_item *item, *next; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 161 | dlm_workfunc_t *workfunc; | 
| Kurt Hackel | 3156d26 | 2006-05-01 14:39:29 -0700 | [diff] [blame] | 162 | int tot=0; | 
|  | 163 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 164 | spin_lock(&dlm->work_lock); | 
|  | 165 | list_splice_init(&dlm->work_list, &tmp_list); | 
|  | 166 | spin_unlock(&dlm->work_lock); | 
|  | 167 |  | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 168 | list_for_each_entry(item, &tmp_list, list) { | 
| Kurt Hackel | 3156d26 | 2006-05-01 14:39:29 -0700 | [diff] [blame] | 169 | tot++; | 
|  | 170 | } | 
|  | 171 | mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); | 
|  | 172 |  | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 173 | list_for_each_entry_safe(item, next, &tmp_list, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 174 | workfunc = item->func; | 
|  | 175 | list_del_init(&item->list); | 
|  | 176 |  | 
|  | 177 | /* already have ref on dlm to avoid having | 
|  | 178 | * it disappear.  just double-check. */ | 
|  | 179 | BUG_ON(item->dlm != dlm); | 
|  | 180 |  | 
|  | 181 | /* this is allowed to sleep and | 
|  | 182 | * call network stuff */ | 
|  | 183 | workfunc(item, item->data); | 
|  | 184 |  | 
|  | 185 | dlm_put(dlm); | 
|  | 186 | kfree(item); | 
|  | 187 | } | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | /* | 
|  | 191 | * RECOVERY THREAD | 
|  | 192 | */ | 
|  | 193 |  | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 194 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 195 | { | 
|  | 196 | /* wake the recovery thread | 
|  | 197 | * this will wake the reco thread in one of three places | 
|  | 198 | * 1) sleeping with no recovery happening | 
|  | 199 | * 2) sleeping with recovery mastered elsewhere | 
|  | 200 | * 3) recovery mastered here, waiting on reco data */ | 
|  | 201 |  | 
|  | 202 | wake_up(&dlm->dlm_reco_thread_wq); | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | /* Launch the recovery thread */ | 
|  | 206 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) | 
|  | 207 | { | 
|  | 208 | mlog(0, "starting dlm recovery thread...\n"); | 
|  | 209 |  | 
|  | 210 | dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, | 
|  | 211 | "dlm_reco_thread"); | 
|  | 212 | if (IS_ERR(dlm->dlm_reco_thread_task)) { | 
|  | 213 | mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); | 
|  | 214 | dlm->dlm_reco_thread_task = NULL; | 
|  | 215 | return -EINVAL; | 
|  | 216 | } | 
|  | 217 |  | 
|  | 218 | return 0; | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) | 
|  | 222 | { | 
|  | 223 | if (dlm->dlm_reco_thread_task) { | 
|  | 224 | mlog(0, "waiting for dlm recovery thread to exit\n"); | 
|  | 225 | kthread_stop(dlm->dlm_reco_thread_task); | 
|  | 226 | dlm->dlm_reco_thread_task = NULL; | 
|  | 227 | } | 
|  | 228 | } | 
|  | 229 |  | 
|  | 230 |  | 
|  | 231 |  | 
|  | 232 | /* | 
|  | 233 | * this is lame, but here's how recovery works... | 
|  | 234 | * 1) all recovery threads cluster wide will work on recovering | 
|  | 235 | *    ONE node at a time | 
|  | 236 | * 2) negotiate who will take over all the locks for the dead node. | 
|  | 237 | *    thats right... ALL the locks. | 
|  | 238 | * 3) once a new master is chosen, everyone scans all locks | 
|  | 239 | *    and moves aside those mastered by the dead guy | 
|  | 240 | * 4) each of these locks should be locked until recovery is done | 
|  | 241 | * 5) the new master collects up all of secondary lock queue info | 
|  | 242 | *    one lock at a time, forcing each node to communicate back | 
|  | 243 | *    before continuing | 
|  | 244 | * 6) each secondary lock queue responds with the full known lock info | 
|  | 245 | * 7) once the new master has run all its locks, it sends a ALLDONE! | 
|  | 246 | *    message to everyone | 
|  | 247 | * 8) upon receiving this message, the secondary queue node unlocks | 
|  | 248 | *    and responds to the ALLDONE | 
|  | 249 | * 9) once the new master gets responses from everyone, he unlocks | 
|  | 250 | *    everything and recovery for this dead node is done | 
|  | 251 | *10) go back to 2) while there are still dead nodes | 
|  | 252 | * | 
|  | 253 | */ | 
|  | 254 |  | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 255 | static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) | 
|  | 256 | { | 
|  | 257 | struct dlm_reco_node_data *ndata; | 
|  | 258 | struct dlm_lock_resource *res; | 
|  | 259 |  | 
|  | 260 | mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 261 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 262 | dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", | 
|  | 263 | dlm->reco.dead_node, dlm->reco.new_master); | 
|  | 264 |  | 
|  | 265 | list_for_each_entry(ndata, &dlm->reco.node_data, list) { | 
|  | 266 | char *st = "unknown"; | 
|  | 267 | switch (ndata->state) { | 
|  | 268 | case DLM_RECO_NODE_DATA_INIT: | 
|  | 269 | st = "init"; | 
|  | 270 | break; | 
|  | 271 | case DLM_RECO_NODE_DATA_REQUESTING: | 
|  | 272 | st = "requesting"; | 
|  | 273 | break; | 
|  | 274 | case DLM_RECO_NODE_DATA_DEAD: | 
|  | 275 | st = "dead"; | 
|  | 276 | break; | 
|  | 277 | case DLM_RECO_NODE_DATA_RECEIVING: | 
|  | 278 | st = "receiving"; | 
|  | 279 | break; | 
|  | 280 | case DLM_RECO_NODE_DATA_REQUESTED: | 
|  | 281 | st = "requested"; | 
|  | 282 | break; | 
|  | 283 | case DLM_RECO_NODE_DATA_DONE: | 
|  | 284 | st = "done"; | 
|  | 285 | break; | 
|  | 286 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | 
|  | 287 | st = "finalize-sent"; | 
|  | 288 | break; | 
|  | 289 | default: | 
|  | 290 | st = "bad"; | 
|  | 291 | break; | 
|  | 292 | } | 
|  | 293 | mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", | 
|  | 294 | dlm->name, ndata->node_num, st); | 
|  | 295 | } | 
|  | 296 | list_for_each_entry(res, &dlm->reco.resources, recovering) { | 
|  | 297 | mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", | 
|  | 298 | dlm->name, res->lockname.len, res->lockname.name); | 
|  | 299 | } | 
|  | 300 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 301 |  | 
|  | 302 | #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) | 
|  | 303 |  | 
|  | 304 | static int dlm_recovery_thread(void *data) | 
|  | 305 | { | 
|  | 306 | int status; | 
|  | 307 | struct dlm_ctxt *dlm = data; | 
|  | 308 | unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); | 
|  | 309 |  | 
|  | 310 | mlog(0, "dlm thread running for %s...\n", dlm->name); | 
|  | 311 |  | 
|  | 312 | while (!kthread_should_stop()) { | 
| Srinivas Eeda | bc9838c | 2010-02-26 12:53:51 -0800 | [diff] [blame] | 313 | if (dlm_domain_fully_joined(dlm)) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 314 | status = dlm_do_recovery(dlm); | 
|  | 315 | if (status == -EAGAIN) { | 
|  | 316 | /* do not sleep, recheck immediately. */ | 
|  | 317 | continue; | 
|  | 318 | } | 
|  | 319 | if (status < 0) | 
|  | 320 | mlog_errno(status); | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, | 
|  | 324 | kthread_should_stop(), | 
|  | 325 | timeout); | 
|  | 326 | } | 
|  | 327 |  | 
|  | 328 | mlog(0, "quitting DLM recovery thread\n"); | 
|  | 329 | return 0; | 
|  | 330 | } | 
|  | 331 |  | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 332 | /* returns true when the recovery master has contacted us */ | 
|  | 333 | static int dlm_reco_master_ready(struct dlm_ctxt *dlm) | 
|  | 334 | { | 
|  | 335 | int ready; | 
|  | 336 | spin_lock(&dlm->spinlock); | 
|  | 337 | ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); | 
|  | 338 | spin_unlock(&dlm->spinlock); | 
|  | 339 | return ready; | 
|  | 340 | } | 
|  | 341 |  | 
|  | 342 | /* returns true if node is no longer in the domain | 
|  | 343 | * could be dead or just not joined */ | 
|  | 344 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) | 
|  | 345 | { | 
|  | 346 | int dead; | 
|  | 347 | spin_lock(&dlm->spinlock); | 
| Kurt Hackel | aba9aac | 2006-04-27 18:00:21 -0700 | [diff] [blame] | 348 | dead = !test_bit(node, dlm->domain_map); | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 349 | spin_unlock(&dlm->spinlock); | 
|  | 350 | return dead; | 
|  | 351 | } | 
|  | 352 |  | 
| Kurt Hackel | b7084ab | 2006-05-01 13:54:07 -0700 | [diff] [blame] | 353 | /* returns true if node is no longer in the domain | 
|  | 354 | * could be dead or just not joined */ | 
| Adrian Bunk | 3fb5a98 | 2006-05-16 17:26:41 +0200 | [diff] [blame] | 355 | static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) | 
| Kurt Hackel | b7084ab | 2006-05-01 13:54:07 -0700 | [diff] [blame] | 356 | { | 
|  | 357 | int recovered; | 
|  | 358 | spin_lock(&dlm->spinlock); | 
|  | 359 | recovered = !test_bit(node, dlm->recovery_map); | 
|  | 360 | spin_unlock(&dlm->spinlock); | 
|  | 361 | return recovered; | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 |  | 
| Kurt Hackel | 44465a7 | 2006-01-18 17:05:38 -0800 | [diff] [blame] | 365 | int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) | 
|  | 366 | { | 
|  | 367 | if (timeout) { | 
|  | 368 | mlog(ML_NOTICE, "%s: waiting %dms for notification of " | 
|  | 369 | "death of node %u\n", dlm->name, timeout, node); | 
|  | 370 | wait_event_timeout(dlm->dlm_reco_thread_wq, | 
|  | 371 | dlm_is_node_dead(dlm, node), | 
|  | 372 | msecs_to_jiffies(timeout)); | 
|  | 373 | } else { | 
|  | 374 | mlog(ML_NOTICE, "%s: waiting indefinitely for notification " | 
|  | 375 | "of death of node %u\n", dlm->name, node); | 
|  | 376 | wait_event(dlm->dlm_reco_thread_wq, | 
|  | 377 | dlm_is_node_dead(dlm, node)); | 
|  | 378 | } | 
|  | 379 | /* for now, return 0 */ | 
|  | 380 | return 0; | 
|  | 381 | } | 
|  | 382 |  | 
| Kurt Hackel | b7084ab | 2006-05-01 13:54:07 -0700 | [diff] [blame] | 383 | int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) | 
|  | 384 | { | 
|  | 385 | if (timeout) { | 
|  | 386 | mlog(0, "%s: waiting %dms for notification of " | 
|  | 387 | "recovery of node %u\n", dlm->name, timeout, node); | 
|  | 388 | wait_event_timeout(dlm->dlm_reco_thread_wq, | 
|  | 389 | dlm_is_node_recovered(dlm, node), | 
|  | 390 | msecs_to_jiffies(timeout)); | 
|  | 391 | } else { | 
|  | 392 | mlog(0, "%s: waiting indefinitely for notification " | 
|  | 393 | "of recovery of node %u\n", dlm->name, node); | 
|  | 394 | wait_event(dlm->dlm_reco_thread_wq, | 
|  | 395 | dlm_is_node_recovered(dlm, node)); | 
|  | 396 | } | 
|  | 397 | /* for now, return 0 */ | 
|  | 398 | return 0; | 
|  | 399 | } | 
|  | 400 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 401 | /* callers of the top-level api calls (dlmlock/dlmunlock) should | 
|  | 402 | * block on the dlm->reco.event when recovery is in progress. | 
|  | 403 | * the dlm recovery thread will set this state when it begins | 
|  | 404 | * recovering a dead node (as the new master or not) and clear | 
|  | 405 | * the state and wake as soon as all affected lock resources have | 
|  | 406 | * been marked with the RECOVERY flag */ | 
|  | 407 | static int dlm_in_recovery(struct dlm_ctxt *dlm) | 
|  | 408 | { | 
|  | 409 | int in_recovery; | 
|  | 410 | spin_lock(&dlm->spinlock); | 
|  | 411 | in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | 
|  | 412 | spin_unlock(&dlm->spinlock); | 
|  | 413 | return in_recovery; | 
|  | 414 | } | 
|  | 415 |  | 
|  | 416 |  | 
|  | 417 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm) | 
|  | 418 | { | 
| Kurt Hackel | 56a7c10 | 2006-05-01 14:30:39 -0700 | [diff] [blame] | 419 | if (dlm_in_recovery(dlm)) { | 
| Kurt Hackel | 3b3b84a | 2006-05-01 14:31:37 -0700 | [diff] [blame] | 420 | mlog(0, "%s: reco thread %d in recovery: " | 
| Kurt Hackel | 56a7c10 | 2006-05-01 14:30:39 -0700 | [diff] [blame] | 421 | "state=%d, master=%u, dead=%u\n", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 422 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), | 
| Kurt Hackel | 56a7c10 | 2006-05-01 14:30:39 -0700 | [diff] [blame] | 423 | dlm->reco.state, dlm->reco.new_master, | 
|  | 424 | dlm->reco.dead_node); | 
|  | 425 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 426 | wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); | 
|  | 427 | } | 
|  | 428 |  | 
|  | 429 | static void dlm_begin_recovery(struct dlm_ctxt *dlm) | 
|  | 430 | { | 
|  | 431 | spin_lock(&dlm->spinlock); | 
|  | 432 | BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | 
|  | 433 | dlm->reco.state |= DLM_RECO_STATE_ACTIVE; | 
|  | 434 | spin_unlock(&dlm->spinlock); | 
|  | 435 | } | 
|  | 436 |  | 
|  | 437 | static void dlm_end_recovery(struct dlm_ctxt *dlm) | 
|  | 438 | { | 
|  | 439 | spin_lock(&dlm->spinlock); | 
|  | 440 | BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); | 
|  | 441 | dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; | 
|  | 442 | spin_unlock(&dlm->spinlock); | 
|  | 443 | wake_up(&dlm->reco.event); | 
|  | 444 | } | 
|  | 445 |  | 
|  | 446 | static int dlm_do_recovery(struct dlm_ctxt *dlm) | 
|  | 447 | { | 
|  | 448 | int status = 0; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 449 | int ret; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 450 |  | 
|  | 451 | spin_lock(&dlm->spinlock); | 
|  | 452 |  | 
|  | 453 | /* check to see if the new master has died */ | 
|  | 454 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && | 
|  | 455 | test_bit(dlm->reco.new_master, dlm->recovery_map)) { | 
|  | 456 | mlog(0, "new master %u died while recovering %u!\n", | 
|  | 457 | dlm->reco.new_master, dlm->reco.dead_node); | 
|  | 458 | /* unset the new_master, leave dead_node */ | 
| Kurt Hackel | ab27eb6 | 2006-04-27 18:03:49 -0700 | [diff] [blame] | 459 | dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 460 | } | 
|  | 461 |  | 
|  | 462 | /* select a target to recover */ | 
|  | 463 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | 
|  | 464 | int bit; | 
|  | 465 |  | 
| Wengang Wang | f471c9d | 2010-06-30 20:23:30 +0800 | [diff] [blame] | 466 | bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 467 | if (bit >= O2NM_MAX_NODES || bit < 0) | 
| Kurt Hackel | ab27eb6 | 2006-04-27 18:03:49 -0700 | [diff] [blame] | 468 | dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 469 | else | 
| Kurt Hackel | ab27eb6 | 2006-04-27 18:03:49 -0700 | [diff] [blame] | 470 | dlm_set_reco_dead_node(dlm, bit); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 471 | } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { | 
|  | 472 | /* BUG? */ | 
|  | 473 | mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", | 
|  | 474 | dlm->reco.dead_node); | 
| Kurt Hackel | ab27eb6 | 2006-04-27 18:03:49 -0700 | [diff] [blame] | 475 | dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 476 | } | 
|  | 477 |  | 
|  | 478 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | 
|  | 479 | // mlog(0, "nothing to recover!  sleeping now!\n"); | 
|  | 480 | spin_unlock(&dlm->spinlock); | 
|  | 481 | /* return to main thread loop and sleep. */ | 
|  | 482 | return 0; | 
|  | 483 | } | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 484 | mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 485 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 486 | dlm->reco.dead_node); | 
|  | 487 | spin_unlock(&dlm->spinlock); | 
|  | 488 |  | 
|  | 489 | /* take write barrier */ | 
|  | 490 | /* (stops the list reshuffling thread, proxy ast handling) */ | 
|  | 491 | dlm_begin_recovery(dlm); | 
|  | 492 |  | 
|  | 493 | if (dlm->reco.new_master == dlm->node_num) | 
|  | 494 | goto master_here; | 
|  | 495 |  | 
|  | 496 | if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 497 | /* choose a new master, returns 0 if this node | 
|  | 498 | * is the master, -EEXIST if it's another node. | 
|  | 499 | * this does not return until a new master is chosen | 
|  | 500 | * or recovery completes entirely. */ | 
|  | 501 | ret = dlm_pick_recovery_master(dlm); | 
|  | 502 | if (!ret) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 503 | /* already notified everyone.  go. */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 504 | goto master_here; | 
|  | 505 | } | 
|  | 506 | mlog(0, "another node will master this recovery session.\n"); | 
|  | 507 | } | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 508 | mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 509 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 510 | dlm->node_num, dlm->reco.dead_node); | 
|  | 511 |  | 
|  | 512 | /* it is safe to start everything back up here | 
|  | 513 | * because all of the dead node's lock resources | 
|  | 514 | * have been marked as in-recovery */ | 
|  | 515 | dlm_end_recovery(dlm); | 
|  | 516 |  | 
|  | 517 | /* sleep out in main dlm_recovery_thread loop. */ | 
|  | 518 | return 0; | 
|  | 519 |  | 
|  | 520 | master_here: | 
| Sunil Mushran | 535f702 | 2008-03-01 14:04:24 -0800 | [diff] [blame] | 521 | mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " | 
|  | 522 | "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task), | 
|  | 523 | dlm->node_num, dlm->reco.dead_node, dlm->name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 524 |  | 
|  | 525 | status = dlm_remaster_locks(dlm, dlm->reco.dead_node); | 
|  | 526 | if (status < 0) { | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 527 | /* we should never hit this anymore */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 528 | mlog(ML_ERROR, "error %d remastering locks for node %u, " | 
|  | 529 | "retrying.\n", status, dlm->reco.dead_node); | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 530 | /* yield a bit to allow any final network messages | 
|  | 531 | * to get handled on remaining nodes */ | 
|  | 532 | msleep(100); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 533 | } else { | 
|  | 534 | /* success!  see if any other nodes need recovery */ | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 535 | mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", | 
|  | 536 | dlm->name, dlm->reco.dead_node, dlm->node_num); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 537 | dlm_reset_recovery(dlm); | 
|  | 538 | } | 
|  | 539 | dlm_end_recovery(dlm); | 
|  | 540 |  | 
|  | 541 | /* continue and look for another dead node */ | 
|  | 542 | return -EAGAIN; | 
|  | 543 | } | 
|  | 544 |  | 
|  | 545 | static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | 
|  | 546 | { | 
|  | 547 | int status = 0; | 
|  | 548 | struct dlm_reco_node_data *ndata; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 549 | int all_nodes_done; | 
|  | 550 | int destroy = 0; | 
|  | 551 | int pass = 0; | 
|  | 552 |  | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 553 | do { | 
|  | 554 | /* we have become recovery master.  there is no escaping | 
|  | 555 | * this, so just keep trying until we get it. */ | 
|  | 556 | status = dlm_init_recovery_area(dlm, dead_node); | 
|  | 557 | if (status < 0) { | 
|  | 558 | mlog(ML_ERROR, "%s: failed to alloc recovery area, " | 
|  | 559 | "retrying\n", dlm->name); | 
|  | 560 | msleep(1000); | 
|  | 561 | } | 
|  | 562 | } while (status != 0); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 563 |  | 
|  | 564 | /* safe to access the node data list without a lock, since this | 
|  | 565 | * process is the only one to change the list */ | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 566 | list_for_each_entry(ndata, &dlm->reco.node_data, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 567 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); | 
|  | 568 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; | 
|  | 569 |  | 
|  | 570 | mlog(0, "requesting lock info from node %u\n", | 
|  | 571 | ndata->node_num); | 
|  | 572 |  | 
|  | 573 | if (ndata->node_num == dlm->node_num) { | 
|  | 574 | ndata->state = DLM_RECO_NODE_DATA_DONE; | 
|  | 575 | continue; | 
|  | 576 | } | 
|  | 577 |  | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 578 | do { | 
|  | 579 | status = dlm_request_all_locks(dlm, ndata->node_num, | 
|  | 580 | dead_node); | 
|  | 581 | if (status < 0) { | 
|  | 582 | mlog_errno(status); | 
|  | 583 | if (dlm_is_host_down(status)) { | 
|  | 584 | /* node died, ignore it for recovery */ | 
|  | 585 | status = 0; | 
|  | 586 | ndata->state = DLM_RECO_NODE_DATA_DEAD; | 
|  | 587 | /* wait for the domain map to catch up | 
|  | 588 | * with the network state. */ | 
|  | 589 | wait_event_timeout(dlm->dlm_reco_thread_wq, | 
|  | 590 | dlm_is_node_dead(dlm, | 
|  | 591 | ndata->node_num), | 
|  | 592 | msecs_to_jiffies(1000)); | 
|  | 593 | mlog(0, "waited 1 sec for %u, " | 
|  | 594 | "dead? %s\n", ndata->node_num, | 
|  | 595 | dlm_is_node_dead(dlm, ndata->node_num) ? | 
|  | 596 | "yes" : "no"); | 
|  | 597 | } else { | 
|  | 598 | /* -ENOMEM on the other node */ | 
|  | 599 | mlog(0, "%s: node %u returned " | 
|  | 600 | "%d during recovery, retrying " | 
|  | 601 | "after a short wait\n", | 
|  | 602 | dlm->name, ndata->node_num, | 
|  | 603 | status); | 
|  | 604 | msleep(100); | 
|  | 605 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 606 | } | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 607 | } while (status != 0); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 608 |  | 
| Srinivas Eeda | 756a150 | 2007-04-17 13:26:33 -0700 | [diff] [blame] | 609 | spin_lock(&dlm_reco_state_lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 610 | switch (ndata->state) { | 
|  | 611 | case DLM_RECO_NODE_DATA_INIT: | 
|  | 612 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | 
|  | 613 | case DLM_RECO_NODE_DATA_REQUESTED: | 
|  | 614 | BUG(); | 
|  | 615 | break; | 
|  | 616 | case DLM_RECO_NODE_DATA_DEAD: | 
|  | 617 | mlog(0, "node %u died after requesting " | 
|  | 618 | "recovery info for node %u\n", | 
|  | 619 | ndata->node_num, dead_node); | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 620 | /* fine.  don't need this node's info. | 
|  | 621 | * continue without it. */ | 
|  | 622 | break; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 623 | case DLM_RECO_NODE_DATA_REQUESTING: | 
|  | 624 | ndata->state = DLM_RECO_NODE_DATA_REQUESTED; | 
|  | 625 | mlog(0, "now receiving recovery data from " | 
|  | 626 | "node %u for dead node %u\n", | 
|  | 627 | ndata->node_num, dead_node); | 
|  | 628 | break; | 
|  | 629 | case DLM_RECO_NODE_DATA_RECEIVING: | 
|  | 630 | mlog(0, "already receiving recovery data from " | 
|  | 631 | "node %u for dead node %u\n", | 
|  | 632 | ndata->node_num, dead_node); | 
|  | 633 | break; | 
|  | 634 | case DLM_RECO_NODE_DATA_DONE: | 
|  | 635 | mlog(0, "already DONE receiving recovery data " | 
|  | 636 | "from node %u for dead node %u\n", | 
|  | 637 | ndata->node_num, dead_node); | 
|  | 638 | break; | 
|  | 639 | } | 
| Srinivas Eeda | 756a150 | 2007-04-17 13:26:33 -0700 | [diff] [blame] | 640 | spin_unlock(&dlm_reco_state_lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 641 | } | 
|  | 642 |  | 
|  | 643 | mlog(0, "done requesting all lock info\n"); | 
|  | 644 |  | 
|  | 645 | /* nodes should be sending reco data now | 
|  | 646 | * just need to wait */ | 
|  | 647 |  | 
|  | 648 | while (1) { | 
|  | 649 | /* check all the nodes now to see if we are | 
|  | 650 | * done, or if anyone died */ | 
|  | 651 | all_nodes_done = 1; | 
|  | 652 | spin_lock(&dlm_reco_state_lock); | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 653 | list_for_each_entry(ndata, &dlm->reco.node_data, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 654 | mlog(0, "checking recovery state of node %u\n", | 
|  | 655 | ndata->node_num); | 
|  | 656 | switch (ndata->state) { | 
|  | 657 | case DLM_RECO_NODE_DATA_INIT: | 
|  | 658 | case DLM_RECO_NODE_DATA_REQUESTING: | 
|  | 659 | mlog(ML_ERROR, "bad ndata state for " | 
|  | 660 | "node %u: state=%d\n", | 
|  | 661 | ndata->node_num, ndata->state); | 
|  | 662 | BUG(); | 
|  | 663 | break; | 
|  | 664 | case DLM_RECO_NODE_DATA_DEAD: | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 665 | mlog(0, "node %u died after " | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 666 | "requesting recovery info for " | 
|  | 667 | "node %u\n", ndata->node_num, | 
|  | 668 | dead_node); | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 669 | break; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 670 | case DLM_RECO_NODE_DATA_RECEIVING: | 
|  | 671 | case DLM_RECO_NODE_DATA_REQUESTED: | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 672 | mlog(0, "%s: node %u still in state %s\n", | 
|  | 673 | dlm->name, ndata->node_num, | 
|  | 674 | ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? | 
|  | 675 | "receiving" : "requested"); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 676 | all_nodes_done = 0; | 
|  | 677 | break; | 
|  | 678 | case DLM_RECO_NODE_DATA_DONE: | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 679 | mlog(0, "%s: node %u state is done\n", | 
|  | 680 | dlm->name, ndata->node_num); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 681 | break; | 
|  | 682 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 683 | mlog(0, "%s: node %u state is finalize\n", | 
|  | 684 | dlm->name, ndata->node_num); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 685 | break; | 
|  | 686 | } | 
|  | 687 | } | 
|  | 688 | spin_unlock(&dlm_reco_state_lock); | 
|  | 689 |  | 
|  | 690 | mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, | 
|  | 691 | all_nodes_done?"yes":"no"); | 
|  | 692 | if (all_nodes_done) { | 
|  | 693 | int ret; | 
|  | 694 |  | 
|  | 695 | /* all nodes are now in DLM_RECO_NODE_DATA_DONE state | 
|  | 696 | * just send a finalize message to everyone and | 
|  | 697 | * clean up */ | 
|  | 698 | mlog(0, "all nodes are done! send finalize\n"); | 
|  | 699 | ret = dlm_send_finalize_reco_message(dlm); | 
|  | 700 | if (ret < 0) | 
|  | 701 | mlog_errno(ret); | 
|  | 702 |  | 
|  | 703 | spin_lock(&dlm->spinlock); | 
|  | 704 | dlm_finish_local_lockres_recovery(dlm, dead_node, | 
|  | 705 | dlm->node_num); | 
|  | 706 | spin_unlock(&dlm->spinlock); | 
|  | 707 | mlog(0, "should be done with recovery!\n"); | 
|  | 708 |  | 
|  | 709 | mlog(0, "finishing recovery of %s at %lu, " | 
|  | 710 | "dead=%u, this=%u, new=%u\n", dlm->name, | 
|  | 711 | jiffies, dlm->reco.dead_node, | 
|  | 712 | dlm->node_num, dlm->reco.new_master); | 
|  | 713 | destroy = 1; | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 714 | status = 0; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 715 | /* rescan everything marked dirty along the way */ | 
|  | 716 | dlm_kick_thread(dlm, NULL); | 
|  | 717 | break; | 
|  | 718 | } | 
|  | 719 | /* wait to be signalled, with periodic timeout | 
|  | 720 | * to check for node death */ | 
|  | 721 | wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, | 
|  | 722 | kthread_should_stop(), | 
|  | 723 | msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); | 
|  | 724 |  | 
|  | 725 | } | 
|  | 726 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 727 | if (destroy) | 
|  | 728 | dlm_destroy_recovery_area(dlm, dead_node); | 
|  | 729 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 730 | return status; | 
|  | 731 | } | 
|  | 732 |  | 
|  | 733 | static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | 
|  | 734 | { | 
|  | 735 | int num=0; | 
|  | 736 | struct dlm_reco_node_data *ndata; | 
|  | 737 |  | 
|  | 738 | spin_lock(&dlm->spinlock); | 
|  | 739 | memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); | 
|  | 740 | /* nodes can only be removed (by dying) after dropping | 
|  | 741 | * this lock, and death will be trapped later, so this should do */ | 
|  | 742 | spin_unlock(&dlm->spinlock); | 
|  | 743 |  | 
|  | 744 | while (1) { | 
|  | 745 | num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); | 
|  | 746 | if (num >= O2NM_MAX_NODES) { | 
|  | 747 | break; | 
|  | 748 | } | 
|  | 749 | BUG_ON(num == dead_node); | 
|  | 750 |  | 
| Robert P. J. Day | cd86128 | 2006-12-13 00:34:52 -0800 | [diff] [blame] | 751 | ndata = kzalloc(sizeof(*ndata), GFP_NOFS); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 752 | if (!ndata) { | 
|  | 753 | dlm_destroy_recovery_area(dlm, dead_node); | 
|  | 754 | return -ENOMEM; | 
|  | 755 | } | 
|  | 756 | ndata->node_num = num; | 
|  | 757 | ndata->state = DLM_RECO_NODE_DATA_INIT; | 
|  | 758 | spin_lock(&dlm_reco_state_lock); | 
|  | 759 | list_add_tail(&ndata->list, &dlm->reco.node_data); | 
|  | 760 | spin_unlock(&dlm_reco_state_lock); | 
|  | 761 | num++; | 
|  | 762 | } | 
|  | 763 |  | 
|  | 764 | return 0; | 
|  | 765 | } | 
|  | 766 |  | 
|  | 767 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | 
|  | 768 | { | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 769 | struct dlm_reco_node_data *ndata, *next; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 770 | LIST_HEAD(tmplist); | 
|  | 771 |  | 
|  | 772 | spin_lock(&dlm_reco_state_lock); | 
|  | 773 | list_splice_init(&dlm->reco.node_data, &tmplist); | 
|  | 774 | spin_unlock(&dlm_reco_state_lock); | 
|  | 775 |  | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 776 | list_for_each_entry_safe(ndata, next, &tmplist, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 777 | list_del_init(&ndata->list); | 
|  | 778 | kfree(ndata); | 
|  | 779 | } | 
|  | 780 | } | 
|  | 781 |  | 
|  | 782 | static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | 
|  | 783 | u8 dead_node) | 
|  | 784 | { | 
|  | 785 | struct dlm_lock_request lr; | 
|  | 786 | enum dlm_status ret; | 
|  | 787 |  | 
|  | 788 | mlog(0, "\n"); | 
|  | 789 |  | 
|  | 790 |  | 
|  | 791 | mlog(0, "dlm_request_all_locks: dead node is %u, sending request " | 
|  | 792 | "to %u\n", dead_node, request_from); | 
|  | 793 |  | 
|  | 794 | memset(&lr, 0, sizeof(lr)); | 
|  | 795 | lr.node_idx = dlm->node_num; | 
|  | 796 | lr.dead_node = dead_node; | 
|  | 797 |  | 
|  | 798 | // send message | 
|  | 799 | ret = DLM_NOLOCKMGR; | 
|  | 800 | ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, | 
|  | 801 | &lr, sizeof(lr), request_from, NULL); | 
|  | 802 |  | 
|  | 803 | /* negative status is handled by caller */ | 
|  | 804 | if (ret < 0) | 
| Wengang Wang | a5196ec | 2010-03-30 12:09:22 +0800 | [diff] [blame] | 805 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 
|  | 806 | "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, | 
|  | 807 | dlm->key, request_from); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 808 |  | 
|  | 809 | // return from here, then | 
|  | 810 | // sleep until all received or error | 
|  | 811 | return ret; | 
|  | 812 |  | 
|  | 813 | } | 
|  | 814 |  | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 815 | int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 816 | void **ret_data) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 817 | { | 
|  | 818 | struct dlm_ctxt *dlm = data; | 
|  | 819 | struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; | 
|  | 820 | char *buf = NULL; | 
|  | 821 | struct dlm_work_item *item = NULL; | 
|  | 822 |  | 
|  | 823 | if (!dlm_grab(dlm)) | 
|  | 824 | return -EINVAL; | 
|  | 825 |  | 
| Kurt Hackel | c3187ce | 2006-04-27 18:05:41 -0700 | [diff] [blame] | 826 | if (lr->dead_node != dlm->reco.dead_node) { | 
|  | 827 | mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " | 
|  | 828 | "dead_node is %u\n", dlm->name, lr->node_idx, | 
|  | 829 | lr->dead_node, dlm->reco.dead_node); | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 830 | dlm_print_reco_node_status(dlm); | 
| Kurt Hackel | c3187ce | 2006-04-27 18:05:41 -0700 | [diff] [blame] | 831 | /* this is a hack */ | 
|  | 832 | dlm_put(dlm); | 
|  | 833 | return -ENOMEM; | 
|  | 834 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 835 | BUG_ON(lr->dead_node != dlm->reco.dead_node); | 
|  | 836 |  | 
| Robert P. J. Day | cd86128 | 2006-12-13 00:34:52 -0800 | [diff] [blame] | 837 | item = kzalloc(sizeof(*item), GFP_NOFS); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 838 | if (!item) { | 
|  | 839 | dlm_put(dlm); | 
|  | 840 | return -ENOMEM; | 
|  | 841 | } | 
|  | 842 |  | 
|  | 843 | /* this will get freed by dlm_request_all_locks_worker */ | 
| Kurt Hackel | ad8100e | 2006-05-01 14:25:21 -0700 | [diff] [blame] | 844 | buf = (char *) __get_free_page(GFP_NOFS); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 845 | if (!buf) { | 
|  | 846 | kfree(item); | 
|  | 847 | dlm_put(dlm); | 
|  | 848 | return -ENOMEM; | 
|  | 849 | } | 
|  | 850 |  | 
|  | 851 | /* queue up work for dlm_request_all_locks_worker */ | 
|  | 852 | dlm_grab(dlm);  /* get an extra ref for the work item */ | 
|  | 853 | dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); | 
|  | 854 | item->u.ral.reco_master = lr->node_idx; | 
|  | 855 | item->u.ral.dead_node = lr->dead_node; | 
|  | 856 | spin_lock(&dlm->work_lock); | 
|  | 857 | list_add_tail(&item->list, &dlm->work_list); | 
|  | 858 | spin_unlock(&dlm->work_lock); | 
| Kurt Hackel | 3156d26 | 2006-05-01 14:39:29 -0700 | [diff] [blame] | 859 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 860 |  | 
|  | 861 | dlm_put(dlm); | 
|  | 862 | return 0; | 
|  | 863 | } | 
|  | 864 |  | 
|  | 865 | static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) | 
|  | 866 | { | 
|  | 867 | struct dlm_migratable_lockres *mres; | 
|  | 868 | struct dlm_lock_resource *res; | 
|  | 869 | struct dlm_ctxt *dlm; | 
|  | 870 | LIST_HEAD(resources); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 871 | int ret; | 
|  | 872 | u8 dead_node, reco_master; | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 873 | int skip_all_done = 0; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 874 |  | 
|  | 875 | dlm = item->dlm; | 
|  | 876 | dead_node = item->u.ral.dead_node; | 
|  | 877 | reco_master = item->u.ral.reco_master; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 878 | mres = (struct dlm_migratable_lockres *)data; | 
|  | 879 |  | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 880 | mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", | 
|  | 881 | dlm->name, dead_node, reco_master); | 
|  | 882 |  | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 883 | if (dead_node != dlm->reco.dead_node || | 
|  | 884 | reco_master != dlm->reco.new_master) { | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 885 | /* worker could have been created before the recovery master | 
|  | 886 | * died.  if so, do not continue, but do not error. */ | 
|  | 887 | if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { | 
|  | 888 | mlog(ML_NOTICE, "%s: will not send recovery state, " | 
|  | 889 | "recovery master %u died, thread=(dead=%u,mas=%u)" | 
|  | 890 | " current=(dead=%u,mas=%u)\n", dlm->name, | 
|  | 891 | reco_master, dead_node, reco_master, | 
|  | 892 | dlm->reco.dead_node, dlm->reco.new_master); | 
|  | 893 | } else { | 
|  | 894 | mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " | 
|  | 895 | "master=%u), request(dead=%u, master=%u)\n", | 
|  | 896 | dlm->name, dlm->reco.dead_node, | 
|  | 897 | dlm->reco.new_master, dead_node, reco_master); | 
|  | 898 | } | 
|  | 899 | goto leave; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 900 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 901 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 902 | /* lock resources should have already been moved to the | 
|  | 903 | * dlm->reco.resources list.  now move items from that list | 
|  | 904 | * to a temp list if the dead owner matches.  note that the | 
|  | 905 | * whole cluster recovers only one node at a time, so we | 
|  | 906 | * can safely move UNKNOWN lock resources for each recovery | 
|  | 907 | * session. */ | 
|  | 908 | dlm_move_reco_locks_to_list(dlm, &resources, dead_node); | 
|  | 909 |  | 
|  | 910 | /* now we can begin blasting lockreses without the dlm lock */ | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 911 |  | 
|  | 912 | /* any errors returned will be due to the new_master dying, | 
|  | 913 | * the dlm_reco_thread should detect this */ | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 914 | list_for_each_entry(res, &resources, recovering) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 915 | ret = dlm_send_one_lockres(dlm, res, mres, reco_master, | 
|  | 916 | DLM_MRES_RECOVERY); | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 917 | if (ret < 0) { | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 918 | mlog(ML_ERROR, "%s: node %u went down while sending " | 
|  | 919 | "recovery state for dead node %u, ret=%d\n", dlm->name, | 
|  | 920 | reco_master, dead_node, ret); | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 921 | skip_all_done = 1; | 
|  | 922 | break; | 
|  | 923 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 924 | } | 
|  | 925 |  | 
|  | 926 | /* move the resources back to the list */ | 
|  | 927 | spin_lock(&dlm->spinlock); | 
|  | 928 | list_splice_init(&resources, &dlm->reco.resources); | 
|  | 929 | spin_unlock(&dlm->spinlock); | 
|  | 930 |  | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 931 | if (!skip_all_done) { | 
|  | 932 | ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); | 
|  | 933 | if (ret < 0) { | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 934 | mlog(ML_ERROR, "%s: node %u went down while sending " | 
|  | 935 | "recovery all-done for dead node %u, ret=%d\n", | 
|  | 936 | dlm->name, reco_master, dead_node, ret); | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 937 | } | 
|  | 938 | } | 
| Kurt Hackel | 6a41321 | 2006-05-01 13:49:20 -0700 | [diff] [blame] | 939 | leave: | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 940 | free_page((unsigned long)data); | 
|  | 941 | } | 
|  | 942 |  | 
|  | 943 |  | 
|  | 944 | static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) | 
|  | 945 | { | 
|  | 946 | int ret, tmpret; | 
|  | 947 | struct dlm_reco_data_done done_msg; | 
|  | 948 |  | 
|  | 949 | memset(&done_msg, 0, sizeof(done_msg)); | 
|  | 950 | done_msg.node_idx = dlm->node_num; | 
|  | 951 | done_msg.dead_node = dead_node; | 
|  | 952 | mlog(0, "sending DATA DONE message to %u, " | 
|  | 953 | "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, | 
|  | 954 | done_msg.dead_node); | 
|  | 955 |  | 
|  | 956 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, | 
|  | 957 | sizeof(done_msg), send_to, &tmpret); | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 958 | if (ret < 0) { | 
| Wengang Wang | a5196ec | 2010-03-30 12:09:22 +0800 | [diff] [blame] | 959 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 
|  | 960 | "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, | 
|  | 961 | dlm->key, send_to); | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 962 | if (!dlm_is_host_down(ret)) { | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 963 | BUG(); | 
|  | 964 | } | 
|  | 965 | } else | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 966 | ret = tmpret; | 
|  | 967 | return ret; | 
|  | 968 | } | 
|  | 969 |  | 
|  | 970 |  | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 971 | int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 972 | void **ret_data) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 973 | { | 
|  | 974 | struct dlm_ctxt *dlm = data; | 
|  | 975 | struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 976 | struct dlm_reco_node_data *ndata = NULL; | 
|  | 977 | int ret = -EINVAL; | 
|  | 978 |  | 
|  | 979 | if (!dlm_grab(dlm)) | 
|  | 980 | return -EINVAL; | 
|  | 981 |  | 
|  | 982 | mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " | 
|  | 983 | "node_idx=%u, this node=%u\n", done->dead_node, | 
|  | 984 | dlm->reco.dead_node, done->node_idx, dlm->node_num); | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 985 |  | 
|  | 986 | mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), | 
|  | 987 | "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " | 
|  | 988 | "node_idx=%u, this node=%u\n", done->dead_node, | 
|  | 989 | dlm->reco.dead_node, done->node_idx, dlm->node_num); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 990 |  | 
|  | 991 | spin_lock(&dlm_reco_state_lock); | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 992 | list_for_each_entry(ndata, &dlm->reco.node_data, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 993 | if (ndata->node_num != done->node_idx) | 
|  | 994 | continue; | 
|  | 995 |  | 
|  | 996 | switch (ndata->state) { | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 997 | /* should have moved beyond INIT but not to FINALIZE yet */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 998 | case DLM_RECO_NODE_DATA_INIT: | 
|  | 999 | case DLM_RECO_NODE_DATA_DEAD: | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1000 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | 
|  | 1001 | mlog(ML_ERROR, "bad ndata state for node %u:" | 
|  | 1002 | " state=%d\n", ndata->node_num, | 
|  | 1003 | ndata->state); | 
|  | 1004 | BUG(); | 
|  | 1005 | break; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 1006 | /* these states are possible at this point, anywhere along | 
|  | 1007 | * the line of recovery */ | 
|  | 1008 | case DLM_RECO_NODE_DATA_DONE: | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1009 | case DLM_RECO_NODE_DATA_RECEIVING: | 
|  | 1010 | case DLM_RECO_NODE_DATA_REQUESTED: | 
|  | 1011 | case DLM_RECO_NODE_DATA_REQUESTING: | 
|  | 1012 | mlog(0, "node %u is DONE sending " | 
|  | 1013 | "recovery data!\n", | 
|  | 1014 | ndata->node_num); | 
|  | 1015 |  | 
|  | 1016 | ndata->state = DLM_RECO_NODE_DATA_DONE; | 
|  | 1017 | ret = 0; | 
|  | 1018 | break; | 
|  | 1019 | } | 
|  | 1020 | } | 
|  | 1021 | spin_unlock(&dlm_reco_state_lock); | 
|  | 1022 |  | 
|  | 1023 | /* wake the recovery thread, some node is done */ | 
|  | 1024 | if (!ret) | 
|  | 1025 | dlm_kick_recovery_thread(dlm); | 
|  | 1026 |  | 
|  | 1027 | if (ret < 0) | 
|  | 1028 | mlog(ML_ERROR, "failed to find recovery node data for node " | 
|  | 1029 | "%u\n", done->node_idx); | 
|  | 1030 | dlm_put(dlm); | 
|  | 1031 |  | 
|  | 1032 | mlog(0, "leaving reco data done handler, ret=%d\n", ret); | 
|  | 1033 | return ret; | 
|  | 1034 | } | 
|  | 1035 |  | 
|  | 1036 | static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, | 
|  | 1037 | struct list_head *list, | 
|  | 1038 | u8 dead_node) | 
|  | 1039 | { | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 1040 | struct dlm_lock_resource *res, *next; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 1041 | struct dlm_lock *lock; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1042 |  | 
|  | 1043 | spin_lock(&dlm->spinlock); | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 1044 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 1045 | /* always prune any $RECOVERY entries for dead nodes, | 
|  | 1046 | * otherwise hangs can occur during later recovery */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1047 | if (dlm_is_recovery_lock(res->lockname.name, | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 1048 | res->lockname.len)) { | 
|  | 1049 | spin_lock(&res->spinlock); | 
|  | 1050 | list_for_each_entry(lock, &res->granted, list) { | 
|  | 1051 | if (lock->ml.node == dead_node) { | 
|  | 1052 | mlog(0, "AHA! there was " | 
|  | 1053 | "a $RECOVERY lock for dead " | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 1054 | "node %u (%s)!\n", | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 1055 | dead_node, dlm->name); | 
|  | 1056 | list_del_init(&lock->list); | 
|  | 1057 | dlm_lock_put(lock); | 
|  | 1058 | break; | 
|  | 1059 | } | 
|  | 1060 | } | 
|  | 1061 | spin_unlock(&res->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1062 | continue; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 1063 | } | 
|  | 1064 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1065 | if (res->owner == dead_node) { | 
|  | 1066 | mlog(0, "found lockres owned by dead node while " | 
|  | 1067 | "doing recovery for node %u. sending it.\n", | 
|  | 1068 | dead_node); | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 1069 | list_move_tail(&res->recovering, list); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1070 | } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | 
|  | 1071 | mlog(0, "found UNKNOWN owner while doing recovery " | 
|  | 1072 | "for node %u. sending it.\n", dead_node); | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 1073 | list_move_tail(&res->recovering, list); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1074 | } | 
|  | 1075 | } | 
|  | 1076 | spin_unlock(&dlm->spinlock); | 
|  | 1077 | } | 
|  | 1078 |  | 
|  | 1079 | static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) | 
|  | 1080 | { | 
|  | 1081 | int total_locks = 0; | 
|  | 1082 | struct list_head *iter, *queue = &res->granted; | 
|  | 1083 | int i; | 
|  | 1084 |  | 
|  | 1085 | for (i=0; i<3; i++) { | 
|  | 1086 | list_for_each(iter, queue) | 
|  | 1087 | total_locks++; | 
|  | 1088 | queue++; | 
|  | 1089 | } | 
|  | 1090 | return total_locks; | 
|  | 1091 | } | 
|  | 1092 |  | 
|  | 1093 |  | 
|  | 1094 | static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | 
|  | 1095 | struct dlm_migratable_lockres *mres, | 
|  | 1096 | u8 send_to, | 
|  | 1097 | struct dlm_lock_resource *res, | 
|  | 1098 | int total_locks) | 
|  | 1099 | { | 
|  | 1100 | u64 mig_cookie = be64_to_cpu(mres->mig_cookie); | 
|  | 1101 | int mres_total_locks = be32_to_cpu(mres->total_locks); | 
|  | 1102 | int sz, ret = 0, status = 0; | 
|  | 1103 | u8 orig_flags = mres->flags, | 
|  | 1104 | orig_master = mres->master; | 
|  | 1105 |  | 
|  | 1106 | BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); | 
|  | 1107 | if (!mres->num_locks) | 
|  | 1108 | return 0; | 
|  | 1109 |  | 
|  | 1110 | sz = sizeof(struct dlm_migratable_lockres) + | 
|  | 1111 | (mres->num_locks * sizeof(struct dlm_migratable_lock)); | 
|  | 1112 |  | 
|  | 1113 | /* add an all-done flag if we reached the last lock */ | 
|  | 1114 | orig_flags = mres->flags; | 
|  | 1115 | BUG_ON(total_locks > mres_total_locks); | 
|  | 1116 | if (total_locks == mres_total_locks) | 
|  | 1117 | mres->flags |= DLM_MRES_ALL_DONE; | 
|  | 1118 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1119 | mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", | 
|  | 1120 | dlm->name, res->lockname.len, res->lockname.name, | 
| Jeff Liu | 17ae26b | 2009-07-07 15:51:40 +0800 | [diff] [blame] | 1121 | orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1122 | send_to); | 
|  | 1123 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1124 | /* send it */ | 
|  | 1125 | ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, | 
|  | 1126 | sz, send_to, &status); | 
|  | 1127 | if (ret < 0) { | 
|  | 1128 | /* XXX: negative status is not handled. | 
|  | 1129 | * this will end up killing this node. */ | 
| Wengang Wang | a5196ec | 2010-03-30 12:09:22 +0800 | [diff] [blame] | 1130 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 
|  | 1131 | "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, | 
|  | 1132 | dlm->key, send_to); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1133 | } else { | 
|  | 1134 | /* might get an -ENOMEM back here */ | 
|  | 1135 | ret = status; | 
|  | 1136 | if (ret < 0) { | 
|  | 1137 | mlog_errno(ret); | 
|  | 1138 |  | 
|  | 1139 | if (ret == -EFAULT) { | 
|  | 1140 | mlog(ML_ERROR, "node %u told me to kill " | 
|  | 1141 | "myself!\n", send_to); | 
|  | 1142 | BUG(); | 
|  | 1143 | } | 
|  | 1144 | } | 
|  | 1145 | } | 
|  | 1146 |  | 
|  | 1147 | /* zero and reinit the message buffer */ | 
|  | 1148 | dlm_init_migratable_lockres(mres, res->lockname.name, | 
|  | 1149 | res->lockname.len, mres_total_locks, | 
|  | 1150 | mig_cookie, orig_flags, orig_master); | 
|  | 1151 | return ret; | 
|  | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, | 
|  | 1155 | const char *lockname, int namelen, | 
|  | 1156 | int total_locks, u64 cookie, | 
|  | 1157 | u8 flags, u8 master) | 
|  | 1158 | { | 
|  | 1159 | /* mres here is one full page */ | 
| Shani Moideen | 5fb0f7f | 2007-06-11 09:38:19 +0530 | [diff] [blame] | 1160 | clear_page(mres); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1161 | mres->lockname_len = namelen; | 
|  | 1162 | memcpy(mres->lockname, lockname, namelen); | 
|  | 1163 | mres->num_locks = 0; | 
|  | 1164 | mres->total_locks = cpu_to_be32(total_locks); | 
|  | 1165 | mres->mig_cookie = cpu_to_be64(cookie); | 
|  | 1166 | mres->flags = flags; | 
|  | 1167 | mres->master = master; | 
|  | 1168 | } | 
|  | 1169 |  | 
| Sunil Mushran | 71656fa | 2010-01-25 16:57:39 -0800 | [diff] [blame] | 1170 | static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, | 
|  | 1171 | struct dlm_migratable_lockres *mres, | 
|  | 1172 | int queue) | 
|  | 1173 | { | 
|  | 1174 | if (!lock->lksb) | 
|  | 1175 | return; | 
|  | 1176 |  | 
|  | 1177 | /* Ignore lvb in all locks in the blocked list */ | 
|  | 1178 | if (queue == DLM_BLOCKED_LIST) | 
|  | 1179 | return; | 
|  | 1180 |  | 
|  | 1181 | /* Only consider lvbs in locks with granted EX or PR lock levels */ | 
|  | 1182 | if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) | 
|  | 1183 | return; | 
|  | 1184 |  | 
|  | 1185 | if (dlm_lvb_is_empty(mres->lvb)) { | 
|  | 1186 | memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); | 
|  | 1187 | return; | 
|  | 1188 | } | 
|  | 1189 |  | 
|  | 1190 | /* Ensure the lvb copied for migration matches in other valid locks */ | 
|  | 1191 | if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) | 
|  | 1192 | return; | 
|  | 1193 |  | 
|  | 1194 | mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " | 
|  | 1195 | "node=%u\n", | 
|  | 1196 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | 
|  | 1197 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | 
|  | 1198 | lock->lockres->lockname.len, lock->lockres->lockname.name, | 
|  | 1199 | lock->ml.node); | 
|  | 1200 | dlm_print_one_lock_resource(lock->lockres); | 
|  | 1201 | BUG(); | 
|  | 1202 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1203 |  | 
|  | 1204 | /* returns 1 if this lock fills the network structure, | 
|  | 1205 | * 0 otherwise */ | 
|  | 1206 | static int dlm_add_lock_to_array(struct dlm_lock *lock, | 
|  | 1207 | struct dlm_migratable_lockres *mres, int queue) | 
|  | 1208 | { | 
|  | 1209 | struct dlm_migratable_lock *ml; | 
|  | 1210 | int lock_num = mres->num_locks; | 
|  | 1211 |  | 
|  | 1212 | ml = &(mres->ml[lock_num]); | 
|  | 1213 | ml->cookie = lock->ml.cookie; | 
|  | 1214 | ml->type = lock->ml.type; | 
|  | 1215 | ml->convert_type = lock->ml.convert_type; | 
|  | 1216 | ml->highest_blocked = lock->ml.highest_blocked; | 
|  | 1217 | ml->list = queue; | 
|  | 1218 | if (lock->lksb) { | 
|  | 1219 | ml->flags = lock->lksb->flags; | 
| Sunil Mushran | 71656fa | 2010-01-25 16:57:39 -0800 | [diff] [blame] | 1220 | dlm_prepare_lvb_for_migration(lock, mres, queue); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1221 | } | 
|  | 1222 | ml->node = lock->ml.node; | 
|  | 1223 | mres->num_locks++; | 
|  | 1224 | /* we reached the max, send this network message */ | 
|  | 1225 | if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) | 
|  | 1226 | return 1; | 
|  | 1227 | return 0; | 
|  | 1228 | } | 
|  | 1229 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1230 | static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, | 
|  | 1231 | struct dlm_migratable_lockres *mres) | 
|  | 1232 | { | 
|  | 1233 | struct dlm_lock dummy; | 
|  | 1234 | memset(&dummy, 0, sizeof(dummy)); | 
|  | 1235 | dummy.ml.cookie = 0; | 
|  | 1236 | dummy.ml.type = LKM_IVMODE; | 
|  | 1237 | dummy.ml.convert_type = LKM_IVMODE; | 
|  | 1238 | dummy.ml.highest_blocked = LKM_IVMODE; | 
|  | 1239 | dummy.lksb = NULL; | 
|  | 1240 | dummy.ml.node = dlm->node_num; | 
|  | 1241 | dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); | 
|  | 1242 | } | 
|  | 1243 |  | 
|  | 1244 | static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, | 
|  | 1245 | struct dlm_migratable_lock *ml, | 
|  | 1246 | u8 *nodenum) | 
|  | 1247 | { | 
|  | 1248 | if (unlikely(ml->cookie == 0 && | 
|  | 1249 | ml->type == LKM_IVMODE && | 
|  | 1250 | ml->convert_type == LKM_IVMODE && | 
|  | 1251 | ml->highest_blocked == LKM_IVMODE && | 
|  | 1252 | ml->list == DLM_BLOCKED_LIST)) { | 
|  | 1253 | *nodenum = ml->node; | 
|  | 1254 | return 1; | 
|  | 1255 | } | 
|  | 1256 | return 0; | 
|  | 1257 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1258 |  | 
|  | 1259 | int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | 
|  | 1260 | struct dlm_migratable_lockres *mres, | 
|  | 1261 | u8 send_to, u8 flags) | 
|  | 1262 | { | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 1263 | struct list_head *queue; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1264 | int total_locks, i; | 
|  | 1265 | u64 mig_cookie = 0; | 
|  | 1266 | struct dlm_lock *lock; | 
|  | 1267 | int ret = 0; | 
|  | 1268 |  | 
|  | 1269 | BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); | 
|  | 1270 |  | 
|  | 1271 | mlog(0, "sending to %u\n", send_to); | 
|  | 1272 |  | 
|  | 1273 | total_locks = dlm_num_locks_in_lockres(res); | 
|  | 1274 | if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { | 
|  | 1275 | /* rare, but possible */ | 
|  | 1276 | mlog(0, "argh.  lockres has %d locks.  this will " | 
|  | 1277 | "require more than one network packet to " | 
|  | 1278 | "migrate\n", total_locks); | 
|  | 1279 | mig_cookie = dlm_get_next_mig_cookie(); | 
|  | 1280 | } | 
|  | 1281 |  | 
|  | 1282 | dlm_init_migratable_lockres(mres, res->lockname.name, | 
|  | 1283 | res->lockname.len, total_locks, | 
|  | 1284 | mig_cookie, flags, res->owner); | 
|  | 1285 |  | 
|  | 1286 | total_locks = 0; | 
|  | 1287 | for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { | 
|  | 1288 | queue = dlm_list_idx_to_ptr(res, i); | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 1289 | list_for_each_entry(lock, queue, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1290 | /* add another lock. */ | 
|  | 1291 | total_locks++; | 
|  | 1292 | if (!dlm_add_lock_to_array(lock, mres, i)) | 
|  | 1293 | continue; | 
|  | 1294 |  | 
|  | 1295 | /* this filled the lock message, | 
|  | 1296 | * we must send it immediately. */ | 
|  | 1297 | ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, | 
|  | 1298 | res, total_locks); | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 1299 | if (ret < 0) | 
|  | 1300 | goto error; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1301 | } | 
|  | 1302 | } | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1303 | if (total_locks == 0) { | 
|  | 1304 | /* send a dummy lock to indicate a mastery reference only */ | 
|  | 1305 | mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", | 
|  | 1306 | dlm->name, res->lockname.len, res->lockname.name, | 
|  | 1307 | send_to, flags & DLM_MRES_RECOVERY ? "recovery" : | 
|  | 1308 | "migration"); | 
|  | 1309 | dlm_add_dummy_lock(dlm, mres); | 
|  | 1310 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1311 | /* flush any remaining locks */ | 
|  | 1312 | ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 1313 | if (ret < 0) | 
|  | 1314 | goto error; | 
|  | 1315 | return ret; | 
|  | 1316 |  | 
|  | 1317 | error: | 
|  | 1318 | mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", | 
|  | 1319 | dlm->name, ret); | 
|  | 1320 | if (!dlm_is_host_down(ret)) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1321 | BUG(); | 
| Kurt Hackel | 29c0fa0 | 2006-04-27 18:06:58 -0700 | [diff] [blame] | 1322 | mlog(0, "%s: node %u went down while sending %s " | 
|  | 1323 | "lockres %.*s\n", dlm->name, send_to, | 
|  | 1324 | flags & DLM_MRES_RECOVERY ?  "recovery" : "migration", | 
|  | 1325 | res->lockname.len, res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1326 | return ret; | 
|  | 1327 | } | 
|  | 1328 |  | 
|  | 1329 |  | 
|  | 1330 |  | 
|  | 1331 | /* | 
|  | 1332 | * this message will contain no more than one page worth of | 
|  | 1333 | * recovery data, and it will work on only one lockres. | 
|  | 1334 | * there may be many locks in this page, and we may need to wait | 
|  | 1335 | * for additional packets to complete all the locks (rare, but | 
|  | 1336 | * possible). | 
|  | 1337 | */ | 
|  | 1338 | /* | 
|  | 1339 | * NOTE: the allocation error cases here are scary | 
|  | 1340 | * we really cannot afford to fail an alloc in recovery | 
|  | 1341 | * do we spin?  returning an error only delays the problem really | 
|  | 1342 | */ | 
|  | 1343 |  | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 1344 | int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 1345 | void **ret_data) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1346 | { | 
|  | 1347 | struct dlm_ctxt *dlm = data; | 
|  | 1348 | struct dlm_migratable_lockres *mres = | 
|  | 1349 | (struct dlm_migratable_lockres *)msg->buf; | 
|  | 1350 | int ret = 0; | 
|  | 1351 | u8 real_master; | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1352 | u8 extra_refs = 0; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1353 | char *buf = NULL; | 
|  | 1354 | struct dlm_work_item *item = NULL; | 
|  | 1355 | struct dlm_lock_resource *res = NULL; | 
|  | 1356 |  | 
|  | 1357 | if (!dlm_grab(dlm)) | 
|  | 1358 | return -EINVAL; | 
|  | 1359 |  | 
|  | 1360 | BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); | 
|  | 1361 |  | 
|  | 1362 | real_master = mres->master; | 
|  | 1363 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | 
|  | 1364 | /* cannot migrate a lockres with no master */ | 
|  | 1365 | BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); | 
|  | 1366 | } | 
|  | 1367 |  | 
|  | 1368 | mlog(0, "%s message received from node %u\n", | 
|  | 1369 | (mres->flags & DLM_MRES_RECOVERY) ? | 
|  | 1370 | "recovery" : "migration", mres->master); | 
|  | 1371 | if (mres->flags & DLM_MRES_ALL_DONE) | 
|  | 1372 | mlog(0, "all done flag.  all lockres data received!\n"); | 
|  | 1373 |  | 
|  | 1374 | ret = -ENOMEM; | 
| Kurt Hackel | ad8100e | 2006-05-01 14:25:21 -0700 | [diff] [blame] | 1375 | buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); | 
| Robert P. J. Day | cd86128 | 2006-12-13 00:34:52 -0800 | [diff] [blame] | 1376 | item = kzalloc(sizeof(*item), GFP_NOFS); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1377 | if (!buf || !item) | 
|  | 1378 | goto leave; | 
|  | 1379 |  | 
|  | 1380 | /* lookup the lock to see if we have a secondary queue for this | 
|  | 1381 | * already...  just add the locks in and this will have its owner | 
|  | 1382 | * and RECOVERY flag changed when it completes. */ | 
|  | 1383 | res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); | 
|  | 1384 | if (res) { | 
|  | 1385 | /* this will get a ref on res */ | 
|  | 1386 | /* mark it as recovering/migrating and hash it */ | 
|  | 1387 | spin_lock(&res->spinlock); | 
|  | 1388 | if (mres->flags & DLM_MRES_RECOVERY) { | 
|  | 1389 | res->state |= DLM_LOCK_RES_RECOVERING; | 
|  | 1390 | } else { | 
|  | 1391 | if (res->state & DLM_LOCK_RES_MIGRATING) { | 
|  | 1392 | /* this is at least the second | 
|  | 1393 | * lockres message */ | 
|  | 1394 | mlog(0, "lock %.*s is already migrating\n", | 
|  | 1395 | mres->lockname_len, | 
|  | 1396 | mres->lockname); | 
|  | 1397 | } else if (res->state & DLM_LOCK_RES_RECOVERING) { | 
|  | 1398 | /* caller should BUG */ | 
|  | 1399 | mlog(ML_ERROR, "node is attempting to migrate " | 
|  | 1400 | "lock %.*s, but marked as recovering!\n", | 
|  | 1401 | mres->lockname_len, mres->lockname); | 
|  | 1402 | ret = -EFAULT; | 
|  | 1403 | spin_unlock(&res->spinlock); | 
|  | 1404 | goto leave; | 
|  | 1405 | } | 
|  | 1406 | res->state |= DLM_LOCK_RES_MIGRATING; | 
|  | 1407 | } | 
|  | 1408 | spin_unlock(&res->spinlock); | 
|  | 1409 | } else { | 
|  | 1410 | /* need to allocate, just like if it was | 
|  | 1411 | * mastered here normally  */ | 
|  | 1412 | res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); | 
|  | 1413 | if (!res) | 
|  | 1414 | goto leave; | 
|  | 1415 |  | 
|  | 1416 | /* to match the ref that we would have gotten if | 
|  | 1417 | * dlm_lookup_lockres had succeeded */ | 
|  | 1418 | dlm_lockres_get(res); | 
|  | 1419 |  | 
|  | 1420 | /* mark it as recovering/migrating and hash it */ | 
|  | 1421 | if (mres->flags & DLM_MRES_RECOVERY) | 
|  | 1422 | res->state |= DLM_LOCK_RES_RECOVERING; | 
|  | 1423 | else | 
|  | 1424 | res->state |= DLM_LOCK_RES_MIGRATING; | 
|  | 1425 |  | 
|  | 1426 | spin_lock(&dlm->spinlock); | 
|  | 1427 | __dlm_insert_lockres(dlm, res); | 
|  | 1428 | spin_unlock(&dlm->spinlock); | 
|  | 1429 |  | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1430 | /* Add an extra ref for this lock-less lockres lest the | 
|  | 1431 | * dlm_thread purges it before we get the chance to add | 
|  | 1432 | * locks to it */ | 
|  | 1433 | dlm_lockres_get(res); | 
|  | 1434 |  | 
|  | 1435 | /* There are three refs that need to be put. | 
|  | 1436 | * 1. Taken above. | 
|  | 1437 | * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). | 
|  | 1438 | * 3. dlm_lookup_lockres() | 
|  | 1439 | * The first one is handled at the end of this function. The | 
|  | 1440 | * other two are handled in the worker thread after locks have | 
|  | 1441 | * been attached. Yes, we don't wait for purge time to match | 
|  | 1442 | * kref_init. The lockres will still have atleast one ref | 
|  | 1443 | * added because it is in the hash __dlm_insert_lockres() */ | 
|  | 1444 | extra_refs++; | 
|  | 1445 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1446 | /* now that the new lockres is inserted, | 
|  | 1447 | * make it usable by other processes */ | 
|  | 1448 | spin_lock(&res->spinlock); | 
|  | 1449 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | 
|  | 1450 | spin_unlock(&res->spinlock); | 
| Kurt Hackel | a6fa364 | 2007-01-17 14:59:12 -0800 | [diff] [blame] | 1451 | wake_up(&res->wq); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1452 | } | 
|  | 1453 |  | 
|  | 1454 | /* at this point we have allocated everything we need, | 
|  | 1455 | * and we have a hashed lockres with an extra ref and | 
|  | 1456 | * the proper res->state flags. */ | 
|  | 1457 | ret = 0; | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1458 | spin_lock(&res->spinlock); | 
|  | 1459 | /* drop this either when master requery finds a different master | 
|  | 1460 | * or when a lock is added by the recovery worker */ | 
|  | 1461 | dlm_lockres_grab_inflight_ref(dlm, res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1462 | if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { | 
|  | 1463 | /* migration cannot have an unknown master */ | 
|  | 1464 | BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); | 
|  | 1465 | mlog(0, "recovery has passed me a lockres with an " | 
|  | 1466 | "unknown owner.. will need to requery: " | 
|  | 1467 | "%.*s\n", mres->lockname_len, mres->lockname); | 
|  | 1468 | } else { | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1469 | /* take a reference now to pin the lockres, drop it | 
|  | 1470 | * when locks are added in the worker */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1471 | dlm_change_lockres_owner(dlm, res, dlm->node_num); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1472 | } | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1473 | spin_unlock(&res->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1474 |  | 
|  | 1475 | /* queue up work for dlm_mig_lockres_worker */ | 
|  | 1476 | dlm_grab(dlm);  /* get an extra ref for the work item */ | 
|  | 1477 | memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */ | 
|  | 1478 | dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); | 
|  | 1479 | item->u.ml.lockres = res; /* already have a ref */ | 
|  | 1480 | item->u.ml.real_master = real_master; | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1481 | item->u.ml.extra_ref = extra_refs; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1482 | spin_lock(&dlm->work_lock); | 
|  | 1483 | list_add_tail(&item->list, &dlm->work_list); | 
|  | 1484 | spin_unlock(&dlm->work_lock); | 
| Kurt Hackel | 3156d26 | 2006-05-01 14:39:29 -0700 | [diff] [blame] | 1485 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1486 |  | 
|  | 1487 | leave: | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1488 | /* One extra ref taken needs to be put here */ | 
|  | 1489 | if (extra_refs) | 
|  | 1490 | dlm_lockres_put(res); | 
|  | 1491 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1492 | dlm_put(dlm); | 
|  | 1493 | if (ret < 0) { | 
|  | 1494 | if (buf) | 
|  | 1495 | kfree(buf); | 
|  | 1496 | if (item) | 
|  | 1497 | kfree(item); | 
| Tao Ma | c1e8d35 | 2011-03-07 16:43:21 +0800 | [diff] [blame] | 1498 | mlog_errno(ret); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1499 | } | 
|  | 1500 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1501 | return ret; | 
|  | 1502 | } | 
|  | 1503 |  | 
|  | 1504 |  | 
|  | 1505 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) | 
|  | 1506 | { | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1507 | struct dlm_ctxt *dlm; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1508 | struct dlm_migratable_lockres *mres; | 
|  | 1509 | int ret = 0; | 
|  | 1510 | struct dlm_lock_resource *res; | 
|  | 1511 | u8 real_master; | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1512 | u8 extra_ref; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1513 |  | 
|  | 1514 | dlm = item->dlm; | 
|  | 1515 | mres = (struct dlm_migratable_lockres *)data; | 
|  | 1516 |  | 
|  | 1517 | res = item->u.ml.lockres; | 
|  | 1518 | real_master = item->u.ml.real_master; | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1519 | extra_ref = item->u.ml.extra_ref; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1520 |  | 
|  | 1521 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | 
|  | 1522 | /* this case is super-rare. only occurs if | 
|  | 1523 | * node death happens during migration. */ | 
|  | 1524 | again: | 
|  | 1525 | ret = dlm_lockres_master_requery(dlm, res, &real_master); | 
|  | 1526 | if (ret < 0) { | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 1527 | mlog(0, "dlm_lockres_master_requery ret=%d\n", | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1528 | ret); | 
|  | 1529 | goto again; | 
|  | 1530 | } | 
|  | 1531 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | 
|  | 1532 | mlog(0, "lockres %.*s not claimed.  " | 
|  | 1533 | "this node will take it.\n", | 
|  | 1534 | res->lockname.len, res->lockname.name); | 
|  | 1535 | } else { | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1536 | spin_lock(&res->spinlock); | 
|  | 1537 | dlm_lockres_drop_inflight_ref(dlm, res); | 
|  | 1538 | spin_unlock(&res->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1539 | mlog(0, "master needs to respond to sender " | 
|  | 1540 | "that node %u still owns %.*s\n", | 
|  | 1541 | real_master, res->lockname.len, | 
|  | 1542 | res->lockname.name); | 
|  | 1543 | /* cannot touch this lockres */ | 
|  | 1544 | goto leave; | 
|  | 1545 | } | 
|  | 1546 | } | 
|  | 1547 |  | 
|  | 1548 | ret = dlm_process_recovery_data(dlm, res, mres); | 
|  | 1549 | if (ret < 0) | 
|  | 1550 | mlog(0, "dlm_process_recovery_data returned  %d\n", ret); | 
|  | 1551 | else | 
|  | 1552 | mlog(0, "dlm_process_recovery_data succeeded\n"); | 
|  | 1553 |  | 
|  | 1554 | if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == | 
|  | 1555 | (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { | 
|  | 1556 | ret = dlm_finish_migration(dlm, res, mres->master); | 
|  | 1557 | if (ret < 0) | 
|  | 1558 | mlog_errno(ret); | 
|  | 1559 | } | 
|  | 1560 |  | 
|  | 1561 | leave: | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1562 | /* See comment in dlm_mig_lockres_handler() */ | 
|  | 1563 | if (res) { | 
|  | 1564 | if (extra_ref) | 
|  | 1565 | dlm_lockres_put(res); | 
|  | 1566 | dlm_lockres_put(res); | 
|  | 1567 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1568 | kfree(data); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1569 | } | 
|  | 1570 |  | 
|  | 1571 |  | 
|  | 1572 |  | 
| Adrian Bunk | 8169cae | 2006-03-31 16:53:55 +0200 | [diff] [blame] | 1573 | static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, | 
|  | 1574 | struct dlm_lock_resource *res, | 
|  | 1575 | u8 *real_master) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1576 | { | 
|  | 1577 | struct dlm_node_iter iter; | 
|  | 1578 | int nodenum; | 
|  | 1579 | int ret = 0; | 
|  | 1580 |  | 
|  | 1581 | *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; | 
|  | 1582 |  | 
|  | 1583 | /* we only reach here if one of the two nodes in a | 
|  | 1584 | * migration died while the migration was in progress. | 
|  | 1585 | * at this point we need to requery the master.  we | 
|  | 1586 | * know that the new_master got as far as creating | 
|  | 1587 | * an mle on at least one node, but we do not know | 
|  | 1588 | * if any nodes had actually cleared the mle and set | 
|  | 1589 | * the master to the new_master.  the old master | 
|  | 1590 | * is supposed to set the owner to UNKNOWN in the | 
|  | 1591 | * event of a new_master death, so the only possible | 
|  | 1592 | * responses that we can get from nodes here are | 
|  | 1593 | * that the master is new_master, or that the master | 
|  | 1594 | * is UNKNOWN. | 
|  | 1595 | * if all nodes come back with UNKNOWN then we know | 
|  | 1596 | * the lock needs remastering here. | 
|  | 1597 | * if any node comes back with a valid master, check | 
|  | 1598 | * to see if that master is the one that we are | 
|  | 1599 | * recovering.  if so, then the new_master died and | 
|  | 1600 | * we need to remaster this lock.  if not, then the | 
|  | 1601 | * new_master survived and that node will respond to | 
|  | 1602 | * other nodes about the owner. | 
|  | 1603 | * if there is an owner, this node needs to dump this | 
|  | 1604 | * lockres and alert the sender that this lockres | 
|  | 1605 | * was rejected. */ | 
|  | 1606 | spin_lock(&dlm->spinlock); | 
|  | 1607 | dlm_node_iter_init(dlm->domain_map, &iter); | 
|  | 1608 | spin_unlock(&dlm->spinlock); | 
|  | 1609 |  | 
|  | 1610 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | 
|  | 1611 | /* do not send to self */ | 
|  | 1612 | if (nodenum == dlm->node_num) | 
|  | 1613 | continue; | 
|  | 1614 | ret = dlm_do_master_requery(dlm, res, nodenum, real_master); | 
|  | 1615 | if (ret < 0) { | 
|  | 1616 | mlog_errno(ret); | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 1617 | if (!dlm_is_host_down(ret)) | 
|  | 1618 | BUG(); | 
|  | 1619 | /* host is down, so answer for that node would be | 
|  | 1620 | * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1621 | } | 
|  | 1622 | if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { | 
|  | 1623 | mlog(0, "lock master is %u\n", *real_master); | 
|  | 1624 | break; | 
|  | 1625 | } | 
|  | 1626 | } | 
|  | 1627 | return ret; | 
|  | 1628 | } | 
|  | 1629 |  | 
|  | 1630 |  | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 1631 | int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | 
|  | 1632 | u8 nodenum, u8 *real_master) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1633 | { | 
|  | 1634 | int ret = -EINVAL; | 
|  | 1635 | struct dlm_master_requery req; | 
|  | 1636 | int status = DLM_LOCK_RES_OWNER_UNKNOWN; | 
|  | 1637 |  | 
|  | 1638 | memset(&req, 0, sizeof(req)); | 
|  | 1639 | req.node_idx = dlm->node_num; | 
|  | 1640 | req.namelen = res->lockname.len; | 
|  | 1641 | memcpy(req.name, res->lockname.name, res->lockname.len); | 
|  | 1642 |  | 
|  | 1643 | ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, | 
|  | 1644 | &req, sizeof(req), nodenum, &status); | 
|  | 1645 | /* XXX: negative status not handled properly here. */ | 
|  | 1646 | if (ret < 0) | 
| Wengang Wang | a5196ec | 2010-03-30 12:09:22 +0800 | [diff] [blame] | 1647 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 
|  | 1648 | "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, | 
|  | 1649 | dlm->key, nodenum); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1650 | else { | 
|  | 1651 | BUG_ON(status < 0); | 
|  | 1652 | BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); | 
|  | 1653 | *real_master = (u8) (status & 0xff); | 
|  | 1654 | mlog(0, "node %u responded to master requery with %u\n", | 
|  | 1655 | nodenum, *real_master); | 
|  | 1656 | ret = 0; | 
|  | 1657 | } | 
|  | 1658 | return ret; | 
|  | 1659 | } | 
|  | 1660 |  | 
|  | 1661 |  | 
|  | 1662 | /* this function cannot error, so unless the sending | 
|  | 1663 | * or receiving of the message failed, the owner can | 
|  | 1664 | * be trusted */ | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 1665 | int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 1666 | void **ret_data) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1667 | { | 
|  | 1668 | struct dlm_ctxt *dlm = data; | 
|  | 1669 | struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; | 
|  | 1670 | struct dlm_lock_resource *res = NULL; | 
| Mark Fasheh | a3d3329 | 2006-03-09 17:55:56 -0800 | [diff] [blame] | 1671 | unsigned int hash; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1672 | int master = DLM_LOCK_RES_OWNER_UNKNOWN; | 
|  | 1673 | u32 flags = DLM_ASSERT_MASTER_REQUERY; | 
|  | 1674 |  | 
|  | 1675 | if (!dlm_grab(dlm)) { | 
|  | 1676 | /* since the domain has gone away on this | 
|  | 1677 | * node, the proper response is UNKNOWN */ | 
|  | 1678 | return master; | 
|  | 1679 | } | 
|  | 1680 |  | 
| Mark Fasheh | a3d3329 | 2006-03-09 17:55:56 -0800 | [diff] [blame] | 1681 | hash = dlm_lockid_hash(req->name, req->namelen); | 
|  | 1682 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1683 | spin_lock(&dlm->spinlock); | 
| Mark Fasheh | a3d3329 | 2006-03-09 17:55:56 -0800 | [diff] [blame] | 1684 | res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1685 | if (res) { | 
|  | 1686 | spin_lock(&res->spinlock); | 
|  | 1687 | master = res->owner; | 
|  | 1688 | if (master == dlm->node_num) { | 
|  | 1689 | int ret = dlm_dispatch_assert_master(dlm, res, | 
|  | 1690 | 0, 0, flags); | 
|  | 1691 | if (ret < 0) { | 
|  | 1692 | mlog_errno(-ENOMEM); | 
|  | 1693 | /* retry!? */ | 
|  | 1694 | BUG(); | 
|  | 1695 | } | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 1696 | } else /* put.. incase we are not the master */ | 
|  | 1697 | dlm_lockres_put(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1698 | spin_unlock(&res->spinlock); | 
|  | 1699 | } | 
|  | 1700 | spin_unlock(&dlm->spinlock); | 
|  | 1701 |  | 
|  | 1702 | dlm_put(dlm); | 
|  | 1703 | return master; | 
|  | 1704 | } | 
|  | 1705 |  | 
|  | 1706 | static inline struct list_head * | 
|  | 1707 | dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) | 
|  | 1708 | { | 
|  | 1709 | struct list_head *ret; | 
|  | 1710 | BUG_ON(list_num < 0); | 
|  | 1711 | BUG_ON(list_num > 2); | 
|  | 1712 | ret = &(res->granted); | 
|  | 1713 | ret += list_num; | 
|  | 1714 | return ret; | 
|  | 1715 | } | 
|  | 1716 | /* TODO: do ast flush business | 
|  | 1717 | * TODO: do MIGRATING and RECOVERING spinning | 
|  | 1718 | */ | 
|  | 1719 |  | 
|  | 1720 | /* | 
|  | 1721 | * NOTE about in-flight requests during migration: | 
|  | 1722 | * | 
|  | 1723 | * Before attempting the migrate, the master has marked the lockres as | 
|  | 1724 | * MIGRATING and then flushed all of its pending ASTS.  So any in-flight | 
|  | 1725 | * requests either got queued before the MIGRATING flag got set, in which | 
|  | 1726 | * case the lock data will reflect the change and a return message is on | 
|  | 1727 | * the way, or the request failed to get in before MIGRATING got set.  In | 
|  | 1728 | * this case, the caller will be told to spin and wait for the MIGRATING | 
|  | 1729 | * flag to be dropped, then recheck the master. | 
|  | 1730 | * This holds true for the convert, cancel and unlock cases, and since lvb | 
|  | 1731 | * updates are tied to these same messages, it applies to lvb updates as | 
|  | 1732 | * well.  For the lock case, there is no way a lock can be on the master | 
|  | 1733 | * queue and not be on the secondary queue since the lock is always added | 
|  | 1734 | * locally first.  This means that the new target node will never be sent | 
|  | 1735 | * a lock that he doesn't already have on the list. | 
|  | 1736 | * In total, this means that the local lock is correct and should not be | 
|  | 1737 | * updated to match the one sent by the master.  Any messages sent back | 
|  | 1738 | * from the master before the MIGRATING flag will bring the lock properly | 
|  | 1739 | * up-to-date, and the change will be ordered properly for the waiter. | 
|  | 1740 | * We will *not* attempt to modify the lock underneath the waiter. | 
|  | 1741 | */ | 
|  | 1742 |  | 
|  | 1743 | static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | 
|  | 1744 | struct dlm_lock_resource *res, | 
|  | 1745 | struct dlm_migratable_lockres *mres) | 
|  | 1746 | { | 
|  | 1747 | struct dlm_migratable_lock *ml; | 
|  | 1748 | struct list_head *queue; | 
| Kurt Hackel | e17e75e | 2007-01-05 15:04:49 -0800 | [diff] [blame] | 1749 | struct list_head *tmpq = NULL; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1750 | struct dlm_lock *newlock = NULL; | 
|  | 1751 | struct dlm_lockstatus *lksb = NULL; | 
|  | 1752 | int ret = 0; | 
| Kurt Hackel | e17e75e | 2007-01-05 15:04:49 -0800 | [diff] [blame] | 1753 | int i, j, bad; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1754 | struct dlm_lock *lock = NULL; | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1755 | u8 from = O2NM_MAX_NODES; | 
|  | 1756 | unsigned int added = 0; | 
| Sunil Mushran | 26636bf | 2010-01-25 16:57:40 -0800 | [diff] [blame] | 1757 | __be64 c; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1758 |  | 
|  | 1759 | mlog(0, "running %d locks for this lockres\n", mres->num_locks); | 
|  | 1760 | for (i=0; i<mres->num_locks; i++) { | 
|  | 1761 | ml = &(mres->ml[i]); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1762 |  | 
|  | 1763 | if (dlm_is_dummy_lock(dlm, ml, &from)) { | 
|  | 1764 | /* placeholder, just need to set the refmap bit */ | 
|  | 1765 | BUG_ON(mres->num_locks != 1); | 
|  | 1766 | mlog(0, "%s:%.*s: dummy lock for %u\n", | 
|  | 1767 | dlm->name, mres->lockname_len, mres->lockname, | 
|  | 1768 | from); | 
|  | 1769 | spin_lock(&res->spinlock); | 
|  | 1770 | dlm_lockres_set_refmap_bit(from, res); | 
|  | 1771 | spin_unlock(&res->spinlock); | 
|  | 1772 | added++; | 
|  | 1773 | break; | 
|  | 1774 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1775 | BUG_ON(ml->highest_blocked != LKM_IVMODE); | 
|  | 1776 | newlock = NULL; | 
|  | 1777 | lksb = NULL; | 
|  | 1778 |  | 
|  | 1779 | queue = dlm_list_num_to_pointer(res, ml->list); | 
| Kurt Hackel | e17e75e | 2007-01-05 15:04:49 -0800 | [diff] [blame] | 1780 | tmpq = NULL; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1781 |  | 
|  | 1782 | /* if the lock is for the local node it needs to | 
|  | 1783 | * be moved to the proper location within the queue. | 
|  | 1784 | * do not allocate a new lock structure. */ | 
|  | 1785 | if (ml->node == dlm->node_num) { | 
|  | 1786 | /* MIGRATION ONLY! */ | 
|  | 1787 | BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); | 
|  | 1788 |  | 
|  | 1789 | spin_lock(&res->spinlock); | 
| Kurt Hackel | e17e75e | 2007-01-05 15:04:49 -0800 | [diff] [blame] | 1790 | for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { | 
|  | 1791 | tmpq = dlm_list_idx_to_ptr(res, j); | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 1792 | list_for_each_entry(lock, tmpq, list) { | 
| Kurt Hackel | e17e75e | 2007-01-05 15:04:49 -0800 | [diff] [blame] | 1793 | if (lock->ml.cookie != ml->cookie) | 
|  | 1794 | lock = NULL; | 
|  | 1795 | else | 
|  | 1796 | break; | 
|  | 1797 | } | 
|  | 1798 | if (lock) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1799 | break; | 
|  | 1800 | } | 
|  | 1801 |  | 
|  | 1802 | /* lock is always created locally first, and | 
|  | 1803 | * destroyed locally last.  it must be on the list */ | 
|  | 1804 | if (!lock) { | 
| Sunil Mushran | 26636bf | 2010-01-25 16:57:40 -0800 | [diff] [blame] | 1805 | c = ml->cookie; | 
|  | 1806 | mlog(ML_ERROR, "Could not find local lock " | 
|  | 1807 | "with cookie %u:%llu, node %u, " | 
|  | 1808 | "list %u, flags 0x%x, type %d, " | 
|  | 1809 | "conv %d, highest blocked %d\n", | 
| Kurt Hackel | 74aa258 | 2007-01-17 15:11:36 -0800 | [diff] [blame] | 1810 | dlm_get_lock_cookie_node(be64_to_cpu(c)), | 
| Sunil Mushran | 26636bf | 2010-01-25 16:57:40 -0800 | [diff] [blame] | 1811 | dlm_get_lock_cookie_seq(be64_to_cpu(c)), | 
|  | 1812 | ml->node, ml->list, ml->flags, ml->type, | 
|  | 1813 | ml->convert_type, ml->highest_blocked); | 
| Kurt Hackel | 71ac106 | 2007-01-05 15:02:30 -0800 | [diff] [blame] | 1814 | __dlm_print_one_lock_resource(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1815 | BUG(); | 
|  | 1816 | } | 
| Sunil Mushran | 26636bf | 2010-01-25 16:57:40 -0800 | [diff] [blame] | 1817 |  | 
|  | 1818 | if (lock->ml.node != ml->node) { | 
|  | 1819 | c = lock->ml.cookie; | 
|  | 1820 | mlog(ML_ERROR, "Mismatched node# in lock " | 
|  | 1821 | "cookie %u:%llu, name %.*s, node %u\n", | 
|  | 1822 | dlm_get_lock_cookie_node(be64_to_cpu(c)), | 
|  | 1823 | dlm_get_lock_cookie_seq(be64_to_cpu(c)), | 
|  | 1824 | res->lockname.len, res->lockname.name, | 
|  | 1825 | lock->ml.node); | 
|  | 1826 | c = ml->cookie; | 
|  | 1827 | mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " | 
|  | 1828 | "node %u, list %u, flags 0x%x, type %d, " | 
|  | 1829 | "conv %d, highest blocked %d\n", | 
|  | 1830 | dlm_get_lock_cookie_node(be64_to_cpu(c)), | 
|  | 1831 | dlm_get_lock_cookie_seq(be64_to_cpu(c)), | 
|  | 1832 | ml->node, ml->list, ml->flags, ml->type, | 
|  | 1833 | ml->convert_type, ml->highest_blocked); | 
|  | 1834 | __dlm_print_one_lock_resource(res); | 
|  | 1835 | BUG(); | 
|  | 1836 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1837 |  | 
| Kurt Hackel | e17e75e | 2007-01-05 15:04:49 -0800 | [diff] [blame] | 1838 | if (tmpq != queue) { | 
| Sunil Mushran | 26636bf | 2010-01-25 16:57:40 -0800 | [diff] [blame] | 1839 | c = ml->cookie; | 
|  | 1840 | mlog(0, "Lock cookie %u:%llu was on list %u " | 
|  | 1841 | "instead of list %u for %.*s\n", | 
|  | 1842 | dlm_get_lock_cookie_node(be64_to_cpu(c)), | 
|  | 1843 | dlm_get_lock_cookie_seq(be64_to_cpu(c)), | 
|  | 1844 | j, ml->list, res->lockname.len, | 
|  | 1845 | res->lockname.name); | 
|  | 1846 | __dlm_print_one_lock_resource(res); | 
| Kurt Hackel | e17e75e | 2007-01-05 15:04:49 -0800 | [diff] [blame] | 1847 | spin_unlock(&res->spinlock); | 
|  | 1848 | continue; | 
|  | 1849 | } | 
|  | 1850 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1851 | /* see NOTE above about why we do not update | 
|  | 1852 | * to match the master here */ | 
|  | 1853 |  | 
|  | 1854 | /* move the lock to its proper place */ | 
|  | 1855 | /* do not alter lock refcount.  switching lists. */ | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 1856 | list_move_tail(&lock->list, queue); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1857 | spin_unlock(&res->spinlock); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1858 | added++; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1859 |  | 
|  | 1860 | mlog(0, "just reordered a local lock!\n"); | 
|  | 1861 | continue; | 
|  | 1862 | } | 
|  | 1863 |  | 
|  | 1864 | /* lock is for another node. */ | 
|  | 1865 | newlock = dlm_new_lock(ml->type, ml->node, | 
|  | 1866 | be64_to_cpu(ml->cookie), NULL); | 
|  | 1867 | if (!newlock) { | 
|  | 1868 | ret = -ENOMEM; | 
|  | 1869 | goto leave; | 
|  | 1870 | } | 
|  | 1871 | lksb = newlock->lksb; | 
|  | 1872 | dlm_lock_attach_lockres(newlock, res); | 
|  | 1873 |  | 
|  | 1874 | if (ml->convert_type != LKM_IVMODE) { | 
|  | 1875 | BUG_ON(queue != &res->converting); | 
|  | 1876 | newlock->ml.convert_type = ml->convert_type; | 
|  | 1877 | } | 
|  | 1878 | lksb->flags |= (ml->flags & | 
|  | 1879 | (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); | 
| Kurt Hackel | ccd8b1f | 2006-05-01 11:32:14 -0700 | [diff] [blame] | 1880 |  | 
|  | 1881 | if (ml->type == LKM_NLMODE) | 
|  | 1882 | goto skip_lvb; | 
|  | 1883 |  | 
| Kurt Hackel | 8bc674c | 2006-04-27 18:02:10 -0700 | [diff] [blame] | 1884 | if (!dlm_lvb_is_empty(mres->lvb)) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1885 | if (lksb->flags & DLM_LKSB_PUT_LVB) { | 
|  | 1886 | /* other node was trying to update | 
|  | 1887 | * lvb when node died.  recreate the | 
|  | 1888 | * lksb with the updated lvb. */ | 
|  | 1889 | memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); | 
| Kurt Hackel | ccd8b1f | 2006-05-01 11:32:14 -0700 | [diff] [blame] | 1890 | /* the lock resource lvb update must happen | 
|  | 1891 | * NOW, before the spinlock is dropped. | 
|  | 1892 | * we no longer wait for the AST to update | 
|  | 1893 | * the lvb. */ | 
|  | 1894 | memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1895 | } else { | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 1896 | /* otherwise, the node is sending its | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1897 | * most recent valid lvb info */ | 
|  | 1898 | BUG_ON(ml->type != LKM_EXMODE && | 
|  | 1899 | ml->type != LKM_PRMODE); | 
| Kurt Hackel | 8bc674c | 2006-04-27 18:02:10 -0700 | [diff] [blame] | 1900 | if (!dlm_lvb_is_empty(res->lvb) && | 
| Kurt Hackel | ccd8b1f | 2006-05-01 11:32:14 -0700 | [diff] [blame] | 1901 | (ml->type == LKM_EXMODE || | 
|  | 1902 | memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { | 
|  | 1903 | int i; | 
|  | 1904 | mlog(ML_ERROR, "%s:%.*s: received bad " | 
|  | 1905 | "lvb! type=%d\n", dlm->name, | 
|  | 1906 | res->lockname.len, | 
|  | 1907 | res->lockname.name, ml->type); | 
|  | 1908 | printk("lockres lvb=["); | 
|  | 1909 | for (i=0; i<DLM_LVB_LEN; i++) | 
|  | 1910 | printk("%02x", res->lvb[i]); | 
|  | 1911 | printk("]\nmigrated lvb=["); | 
|  | 1912 | for (i=0; i<DLM_LVB_LEN; i++) | 
|  | 1913 | printk("%02x", mres->lvb[i]); | 
|  | 1914 | printk("]\n"); | 
|  | 1915 | dlm_print_one_lock_resource(res); | 
|  | 1916 | BUG(); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1917 | } | 
|  | 1918 | memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); | 
|  | 1919 | } | 
|  | 1920 | } | 
| Kurt Hackel | ccd8b1f | 2006-05-01 11:32:14 -0700 | [diff] [blame] | 1921 | skip_lvb: | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1922 |  | 
|  | 1923 | /* NOTE: | 
|  | 1924 | * wrt lock queue ordering and recovery: | 
|  | 1925 | *    1. order of locks on granted queue is | 
|  | 1926 | *       meaningless. | 
|  | 1927 | *    2. order of locks on converting queue is | 
|  | 1928 | *       LOST with the node death.  sorry charlie. | 
|  | 1929 | *    3. order of locks on the blocked queue is | 
|  | 1930 | *       also LOST. | 
|  | 1931 | * order of locks does not affect integrity, it | 
|  | 1932 | * just means that a lock request may get pushed | 
|  | 1933 | * back in line as a result of the node death. | 
|  | 1934 | * also note that for a given node the lock order | 
|  | 1935 | * for its secondary queue locks is preserved | 
|  | 1936 | * relative to each other, but clearly *not* | 
|  | 1937 | * preserved relative to locks from other nodes. | 
|  | 1938 | */ | 
| Kurt Hackel | c3187ce | 2006-04-27 18:05:41 -0700 | [diff] [blame] | 1939 | bad = 0; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1940 | spin_lock(&res->spinlock); | 
| Kurt Hackel | c3187ce | 2006-04-27 18:05:41 -0700 | [diff] [blame] | 1941 | list_for_each_entry(lock, queue, list) { | 
|  | 1942 | if (lock->ml.cookie == ml->cookie) { | 
| Sunil Mushran | 26636bf | 2010-01-25 16:57:40 -0800 | [diff] [blame] | 1943 | c = lock->ml.cookie; | 
| Kurt Hackel | c3187ce | 2006-04-27 18:05:41 -0700 | [diff] [blame] | 1944 | mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " | 
|  | 1945 | "exists on this lockres!\n", dlm->name, | 
|  | 1946 | res->lockname.len, res->lockname.name, | 
| Kurt Hackel | 74aa258 | 2007-01-17 15:11:36 -0800 | [diff] [blame] | 1947 | dlm_get_lock_cookie_node(be64_to_cpu(c)), | 
|  | 1948 | dlm_get_lock_cookie_seq(be64_to_cpu(c))); | 
| Kurt Hackel | c3187ce | 2006-04-27 18:05:41 -0700 | [diff] [blame] | 1949 |  | 
|  | 1950 | mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " | 
|  | 1951 | "node=%u, cookie=%u:%llu, queue=%d\n", | 
|  | 1952 | ml->type, ml->convert_type, ml->node, | 
| Kurt Hackel | 74aa258 | 2007-01-17 15:11:36 -0800 | [diff] [blame] | 1953 | dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), | 
|  | 1954 | dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), | 
| Kurt Hackel | c3187ce | 2006-04-27 18:05:41 -0700 | [diff] [blame] | 1955 | ml->list); | 
|  | 1956 |  | 
|  | 1957 | __dlm_print_one_lock_resource(res); | 
|  | 1958 | bad = 1; | 
|  | 1959 | break; | 
|  | 1960 | } | 
|  | 1961 | } | 
|  | 1962 | if (!bad) { | 
|  | 1963 | dlm_lock_get(newlock); | 
|  | 1964 | list_add_tail(&newlock->list, queue); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1965 | mlog(0, "%s:%.*s: added lock for node %u, " | 
|  | 1966 | "setting refmap bit\n", dlm->name, | 
|  | 1967 | res->lockname.len, res->lockname.name, ml->node); | 
|  | 1968 | dlm_lockres_set_refmap_bit(ml->node, res); | 
|  | 1969 | added++; | 
| Kurt Hackel | c3187ce | 2006-04-27 18:05:41 -0700 | [diff] [blame] | 1970 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1971 | spin_unlock(&res->spinlock); | 
|  | 1972 | } | 
|  | 1973 | mlog(0, "done running all the locks\n"); | 
|  | 1974 |  | 
|  | 1975 | leave: | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1976 | /* balance the ref taken when the work was queued */ | 
| Kurt Hackel | 50635f1 | 2007-01-17 14:54:39 -0800 | [diff] [blame] | 1977 | spin_lock(&res->spinlock); | 
|  | 1978 | dlm_lockres_drop_inflight_ref(dlm, res); | 
|  | 1979 | spin_unlock(&res->spinlock); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1980 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1981 | if (ret < 0) { | 
|  | 1982 | mlog_errno(ret); | 
|  | 1983 | if (newlock) | 
|  | 1984 | dlm_lock_put(newlock); | 
|  | 1985 | } | 
|  | 1986 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1987 | return ret; | 
|  | 1988 | } | 
|  | 1989 |  | 
|  | 1990 | void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, | 
|  | 1991 | struct dlm_lock_resource *res) | 
|  | 1992 | { | 
|  | 1993 | int i; | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 1994 | struct list_head *queue; | 
|  | 1995 | struct dlm_lock *lock, *next; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1996 |  | 
| Wengang Wang | a524812 | 2010-07-30 16:14:44 +0800 | [diff] [blame] | 1997 | assert_spin_locked(&dlm->spinlock); | 
|  | 1998 | assert_spin_locked(&res->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1999 | res->state |= DLM_LOCK_RES_RECOVERING; | 
| Kurt Hackel | 69d72b0 | 2006-05-01 10:57:51 -0700 | [diff] [blame] | 2000 | if (!list_empty(&res->recovering)) { | 
|  | 2001 | mlog(0, | 
|  | 2002 | "Recovering res %s:%.*s, is already on recovery list!\n", | 
|  | 2003 | dlm->name, res->lockname.len, res->lockname.name); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2004 | list_del_init(&res->recovering); | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 2005 | dlm_lockres_put(res); | 
| Kurt Hackel | 69d72b0 | 2006-05-01 10:57:51 -0700 | [diff] [blame] | 2006 | } | 
|  | 2007 | /* We need to hold a reference while on the recovery list */ | 
|  | 2008 | dlm_lockres_get(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2009 | list_add_tail(&res->recovering, &dlm->reco.resources); | 
|  | 2010 |  | 
|  | 2011 | /* find any pending locks and put them back on proper list */ | 
|  | 2012 | for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { | 
|  | 2013 | queue = dlm_list_idx_to_ptr(res, i); | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2014 | list_for_each_entry_safe(lock, next, queue, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2015 | dlm_lock_get(lock); | 
|  | 2016 | if (lock->convert_pending) { | 
|  | 2017 | /* move converting lock back to granted */ | 
|  | 2018 | BUG_ON(i != DLM_CONVERTING_LIST); | 
|  | 2019 | mlog(0, "node died with convert pending " | 
|  | 2020 | "on %.*s. move back to granted list.\n", | 
|  | 2021 | res->lockname.len, res->lockname.name); | 
|  | 2022 | dlm_revert_pending_convert(res, lock); | 
|  | 2023 | lock->convert_pending = 0; | 
|  | 2024 | } else if (lock->lock_pending) { | 
|  | 2025 | /* remove pending lock requests completely */ | 
|  | 2026 | BUG_ON(i != DLM_BLOCKED_LIST); | 
|  | 2027 | mlog(0, "node died with lock pending " | 
|  | 2028 | "on %.*s. remove from blocked list and skip.\n", | 
|  | 2029 | res->lockname.len, res->lockname.name); | 
|  | 2030 | /* lock will be floating until ref in | 
|  | 2031 | * dlmlock_remote is freed after the network | 
|  | 2032 | * call returns.  ok for it to not be on any | 
|  | 2033 | * list since no ast can be called | 
|  | 2034 | * (the master is dead). */ | 
|  | 2035 | dlm_revert_pending_lock(res, lock); | 
|  | 2036 | lock->lock_pending = 0; | 
|  | 2037 | } else if (lock->unlock_pending) { | 
|  | 2038 | /* if an unlock was in progress, treat as | 
|  | 2039 | * if this had completed successfully | 
|  | 2040 | * before sending this lock state to the | 
|  | 2041 | * new master.  note that the dlm_unlock | 
|  | 2042 | * call is still responsible for calling | 
|  | 2043 | * the unlockast.  that will happen after | 
|  | 2044 | * the network call times out.  for now, | 
|  | 2045 | * just move lists to prepare the new | 
|  | 2046 | * recovery master.  */ | 
|  | 2047 | BUG_ON(i != DLM_GRANTED_LIST); | 
|  | 2048 | mlog(0, "node died with unlock pending " | 
|  | 2049 | "on %.*s. remove from blocked list and skip.\n", | 
|  | 2050 | res->lockname.len, res->lockname.name); | 
|  | 2051 | dlm_commit_pending_unlock(res, lock); | 
|  | 2052 | lock->unlock_pending = 0; | 
|  | 2053 | } else if (lock->cancel_pending) { | 
|  | 2054 | /* if a cancel was in progress, treat as | 
|  | 2055 | * if this had completed successfully | 
|  | 2056 | * before sending this lock state to the | 
|  | 2057 | * new master */ | 
|  | 2058 | BUG_ON(i != DLM_CONVERTING_LIST); | 
|  | 2059 | mlog(0, "node died with cancel pending " | 
|  | 2060 | "on %.*s. move back to granted list.\n", | 
|  | 2061 | res->lockname.len, res->lockname.name); | 
|  | 2062 | dlm_commit_pending_cancel(res, lock); | 
|  | 2063 | lock->cancel_pending = 0; | 
|  | 2064 | } | 
|  | 2065 | dlm_lock_put(lock); | 
|  | 2066 | } | 
|  | 2067 | } | 
|  | 2068 | } | 
|  | 2069 |  | 
|  | 2070 |  | 
|  | 2071 |  | 
|  | 2072 | /* removes all recovered locks from the recovery list. | 
|  | 2073 | * sets the res->owner to the new master. | 
|  | 2074 | * unsets the RECOVERY flag and wakes waiters. */ | 
|  | 2075 | static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | 
|  | 2076 | u8 dead_node, u8 new_master) | 
|  | 2077 | { | 
|  | 2078 | int i; | 
| Mark Fasheh | 81f2094 | 2006-02-28 17:31:22 -0800 | [diff] [blame] | 2079 | struct hlist_node *hash_iter; | 
|  | 2080 | struct hlist_head *bucket; | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2081 | struct dlm_lock_resource *res, *next; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2082 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2083 | assert_spin_locked(&dlm->spinlock); | 
|  | 2084 |  | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2085 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2086 | if (res->owner == dead_node) { | 
|  | 2087 | list_del_init(&res->recovering); | 
|  | 2088 | spin_lock(&res->spinlock); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2089 | /* new_master has our reference from | 
|  | 2090 | * the lock state sent during recovery */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2091 | dlm_change_lockres_owner(dlm, res, new_master); | 
|  | 2092 | res->state &= ~DLM_LOCK_RES_RECOVERING; | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2093 | if (__dlm_lockres_has_locks(res)) | 
| Kurt Hackel | 69d72b0 | 2006-05-01 10:57:51 -0700 | [diff] [blame] | 2094 | __dlm_dirty_lockres(dlm, res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2095 | spin_unlock(&res->spinlock); | 
|  | 2096 | wake_up(&res->wq); | 
| Kurt Hackel | 69d72b0 | 2006-05-01 10:57:51 -0700 | [diff] [blame] | 2097 | dlm_lockres_put(res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2098 | } | 
|  | 2099 | } | 
|  | 2100 |  | 
|  | 2101 | /* this will become unnecessary eventually, but | 
|  | 2102 | * for now we need to run the whole hash, clear | 
|  | 2103 | * the RECOVERING state and set the owner | 
|  | 2104 | * if necessary */ | 
| Mark Fasheh | 81f2094 | 2006-02-28 17:31:22 -0800 | [diff] [blame] | 2105 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | 
| Daniel Phillips | 03d864c | 2006-03-10 18:08:16 -0800 | [diff] [blame] | 2106 | bucket = dlm_lockres_hash(dlm, i); | 
| Mark Fasheh | 81f2094 | 2006-02-28 17:31:22 -0800 | [diff] [blame] | 2107 | hlist_for_each_entry(res, hash_iter, bucket, hash_node) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2108 | if (res->state & DLM_LOCK_RES_RECOVERING) { | 
|  | 2109 | if (res->owner == dead_node) { | 
|  | 2110 | mlog(0, "(this=%u) res %.*s owner=%u " | 
|  | 2111 | "was not on recovering list, but " | 
|  | 2112 | "clearing state anyway\n", | 
|  | 2113 | dlm->node_num, res->lockname.len, | 
|  | 2114 | res->lockname.name, new_master); | 
|  | 2115 | } else if (res->owner == dlm->node_num) { | 
|  | 2116 | mlog(0, "(this=%u) res %.*s owner=%u " | 
|  | 2117 | "was not on recovering list, " | 
|  | 2118 | "owner is THIS node, clearing\n", | 
|  | 2119 | dlm->node_num, res->lockname.len, | 
|  | 2120 | res->lockname.name, new_master); | 
|  | 2121 | } else | 
|  | 2122 | continue; | 
|  | 2123 |  | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 2124 | if (!list_empty(&res->recovering)) { | 
|  | 2125 | mlog(0, "%s:%.*s: lockres was " | 
|  | 2126 | "marked RECOVERING, owner=%u\n", | 
|  | 2127 | dlm->name, res->lockname.len, | 
|  | 2128 | res->lockname.name, res->owner); | 
|  | 2129 | list_del_init(&res->recovering); | 
| Kurt Hackel | 69d72b0 | 2006-05-01 10:57:51 -0700 | [diff] [blame] | 2130 | dlm_lockres_put(res); | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 2131 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2132 | spin_lock(&res->spinlock); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2133 | /* new_master has our reference from | 
|  | 2134 | * the lock state sent during recovery */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2135 | dlm_change_lockres_owner(dlm, res, new_master); | 
|  | 2136 | res->state &= ~DLM_LOCK_RES_RECOVERING; | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2137 | if (__dlm_lockres_has_locks(res)) | 
| Kurt Hackel | 69d72b0 | 2006-05-01 10:57:51 -0700 | [diff] [blame] | 2138 | __dlm_dirty_lockres(dlm, res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2139 | spin_unlock(&res->spinlock); | 
|  | 2140 | wake_up(&res->wq); | 
|  | 2141 | } | 
|  | 2142 | } | 
|  | 2143 | } | 
|  | 2144 | } | 
|  | 2145 |  | 
|  | 2146 | static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) | 
|  | 2147 | { | 
|  | 2148 | if (local) { | 
|  | 2149 | if (lock->ml.type != LKM_EXMODE && | 
|  | 2150 | lock->ml.type != LKM_PRMODE) | 
|  | 2151 | return 1; | 
|  | 2152 | } else if (lock->ml.type == LKM_EXMODE) | 
|  | 2153 | return 1; | 
|  | 2154 | return 0; | 
|  | 2155 | } | 
|  | 2156 |  | 
|  | 2157 | static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, | 
|  | 2158 | struct dlm_lock_resource *res, u8 dead_node) | 
|  | 2159 | { | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2160 | struct list_head *queue; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2161 | struct dlm_lock *lock; | 
|  | 2162 | int blank_lvb = 0, local = 0; | 
|  | 2163 | int i; | 
|  | 2164 | u8 search_node; | 
|  | 2165 |  | 
|  | 2166 | assert_spin_locked(&dlm->spinlock); | 
|  | 2167 | assert_spin_locked(&res->spinlock); | 
|  | 2168 |  | 
|  | 2169 | if (res->owner == dlm->node_num) | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2170 | /* if this node owned the lockres, and if the dead node | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2171 | * had an EX when he died, blank out the lvb */ | 
|  | 2172 | search_node = dead_node; | 
|  | 2173 | else { | 
|  | 2174 | /* if this is a secondary lockres, and we had no EX or PR | 
|  | 2175 | * locks granted, we can no longer trust the lvb */ | 
|  | 2176 | search_node = dlm->node_num; | 
|  | 2177 | local = 1;  /* check local state for valid lvb */ | 
|  | 2178 | } | 
|  | 2179 |  | 
|  | 2180 | for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { | 
|  | 2181 | queue = dlm_list_idx_to_ptr(res, i); | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2182 | list_for_each_entry(lock, queue, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2183 | if (lock->ml.node == search_node) { | 
|  | 2184 | if (dlm_lvb_needs_invalidation(lock, local)) { | 
|  | 2185 | /* zero the lksb lvb and lockres lvb */ | 
|  | 2186 | blank_lvb = 1; | 
|  | 2187 | memset(lock->lksb->lvb, 0, DLM_LVB_LEN); | 
|  | 2188 | } | 
|  | 2189 | } | 
|  | 2190 | } | 
|  | 2191 | } | 
|  | 2192 |  | 
|  | 2193 | if (blank_lvb) { | 
|  | 2194 | mlog(0, "clearing %.*s lvb, dead node %u had EX\n", | 
|  | 2195 | res->lockname.len, res->lockname.name, dead_node); | 
|  | 2196 | memset(res->lvb, 0, DLM_LVB_LEN); | 
|  | 2197 | } | 
|  | 2198 | } | 
|  | 2199 |  | 
|  | 2200 | static void dlm_free_dead_locks(struct dlm_ctxt *dlm, | 
|  | 2201 | struct dlm_lock_resource *res, u8 dead_node) | 
|  | 2202 | { | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2203 | struct dlm_lock *lock, *next; | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2204 | unsigned int freed = 0; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2205 |  | 
|  | 2206 | /* this node is the lockres master: | 
|  | 2207 | * 1) remove any stale locks for the dead node | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2208 | * 2) if the dead node had an EX when he died, blank out the lvb | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2209 | */ | 
|  | 2210 | assert_spin_locked(&dlm->spinlock); | 
|  | 2211 | assert_spin_locked(&res->spinlock); | 
|  | 2212 |  | 
| Sunil Mushran | 2c5c54a | 2008-03-01 14:04:20 -0800 | [diff] [blame] | 2213 | /* We do two dlm_lock_put(). One for removing from list and the other is | 
|  | 2214 | * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ | 
|  | 2215 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2216 | /* TODO: check pending_asts, pending_basts here */ | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2217 | list_for_each_entry_safe(lock, next, &res->granted, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2218 | if (lock->ml.node == dead_node) { | 
|  | 2219 | list_del_init(&lock->list); | 
|  | 2220 | dlm_lock_put(lock); | 
| Sunil Mushran | 2c5c54a | 2008-03-01 14:04:20 -0800 | [diff] [blame] | 2221 | /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ | 
|  | 2222 | dlm_lock_put(lock); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2223 | freed++; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2224 | } | 
|  | 2225 | } | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2226 | list_for_each_entry_safe(lock, next, &res->converting, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2227 | if (lock->ml.node == dead_node) { | 
|  | 2228 | list_del_init(&lock->list); | 
|  | 2229 | dlm_lock_put(lock); | 
| Sunil Mushran | 2c5c54a | 2008-03-01 14:04:20 -0800 | [diff] [blame] | 2230 | /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ | 
|  | 2231 | dlm_lock_put(lock); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2232 | freed++; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2233 | } | 
|  | 2234 | } | 
| Christoph Hellwig | 800deef | 2007-05-17 16:03:13 +0200 | [diff] [blame] | 2235 | list_for_each_entry_safe(lock, next, &res->blocked, list) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2236 | if (lock->ml.node == dead_node) { | 
|  | 2237 | list_del_init(&lock->list); | 
|  | 2238 | dlm_lock_put(lock); | 
| Sunil Mushran | 2c5c54a | 2008-03-01 14:04:20 -0800 | [diff] [blame] | 2239 | /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ | 
|  | 2240 | dlm_lock_put(lock); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2241 | freed++; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2242 | } | 
|  | 2243 | } | 
|  | 2244 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2245 | if (freed) { | 
|  | 2246 | mlog(0, "%s:%.*s: freed %u locks for dead node %u, " | 
|  | 2247 | "dropping ref from lockres\n", dlm->name, | 
|  | 2248 | res->lockname.len, res->lockname.name, freed, dead_node); | 
| Sunil Mushran | cda70ba | 2010-02-01 17:34:58 -0800 | [diff] [blame] | 2249 | if(!test_bit(dead_node, res->refmap)) { | 
|  | 2250 | mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " | 
|  | 2251 | "but ref was not set\n", dlm->name, | 
|  | 2252 | res->lockname.len, res->lockname.name, freed, dead_node); | 
|  | 2253 | __dlm_print_one_lock_resource(res); | 
|  | 2254 | } | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2255 | dlm_lockres_clear_refmap_bit(dead_node, res); | 
|  | 2256 | } else if (test_bit(dead_node, res->refmap)) { | 
|  | 2257 | mlog(0, "%s:%.*s: dead node %u had a ref, but had " | 
|  | 2258 | "no locks and had not purged before dying\n", dlm->name, | 
|  | 2259 | res->lockname.len, res->lockname.name, dead_node); | 
|  | 2260 | dlm_lockres_clear_refmap_bit(dead_node, res); | 
|  | 2261 | } | 
|  | 2262 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2263 | /* do not kick thread yet */ | 
|  | 2264 | __dlm_dirty_lockres(dlm, res); | 
|  | 2265 | } | 
|  | 2266 |  | 
|  | 2267 | /* if this node is the recovery master, and there are no | 
|  | 2268 | * locks for a given lockres owned by this node that are in | 
|  | 2269 | * either PR or EX mode, zero out the lvb before requesting. | 
|  | 2270 | * | 
|  | 2271 | */ | 
|  | 2272 |  | 
|  | 2273 |  | 
|  | 2274 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | 
|  | 2275 | { | 
| Mark Fasheh | 81f2094 | 2006-02-28 17:31:22 -0800 | [diff] [blame] | 2276 | struct hlist_node *iter; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2277 | struct dlm_lock_resource *res; | 
|  | 2278 | int i; | 
| Mark Fasheh | 81f2094 | 2006-02-28 17:31:22 -0800 | [diff] [blame] | 2279 | struct hlist_head *bucket; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2280 | struct dlm_lock *lock; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2281 |  | 
|  | 2282 |  | 
|  | 2283 | /* purge any stale mles */ | 
|  | 2284 | dlm_clean_master_list(dlm, dead_node); | 
|  | 2285 |  | 
|  | 2286 | /* | 
|  | 2287 | * now clean up all lock resources.  there are two rules: | 
|  | 2288 | * | 
|  | 2289 | * 1) if the dead node was the master, move the lockres | 
|  | 2290 | *    to the recovering list.  set the RECOVERING flag. | 
|  | 2291 | *    this lockres needs to be cleaned up before it can | 
|  | 2292 | *    be used further. | 
|  | 2293 | * | 
|  | 2294 | * 2) if this node was the master, remove all locks from | 
|  | 2295 | *    each of the lockres queues that were owned by the | 
|  | 2296 | *    dead node.  once recovery finishes, the dlm thread | 
|  | 2297 | *    can be kicked again to see if any ASTs or BASTs | 
|  | 2298 | *    need to be fired as a result. | 
|  | 2299 | */ | 
| Mark Fasheh | 81f2094 | 2006-02-28 17:31:22 -0800 | [diff] [blame] | 2300 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | 
| Daniel Phillips | 03d864c | 2006-03-10 18:08:16 -0800 | [diff] [blame] | 2301 | bucket = dlm_lockres_hash(dlm, i); | 
| Mark Fasheh | 81f2094 | 2006-02-28 17:31:22 -0800 | [diff] [blame] | 2302 | hlist_for_each_entry(res, iter, bucket, hash_node) { | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2303 | /* always prune any $RECOVERY entries for dead nodes, | 
|  | 2304 | * otherwise hangs can occur during later recovery */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2305 | if (dlm_is_recovery_lock(res->lockname.name, | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2306 | res->lockname.len)) { | 
|  | 2307 | spin_lock(&res->spinlock); | 
|  | 2308 | list_for_each_entry(lock, &res->granted, list) { | 
|  | 2309 | if (lock->ml.node == dead_node) { | 
|  | 2310 | mlog(0, "AHA! there was " | 
|  | 2311 | "a $RECOVERY lock for dead " | 
|  | 2312 | "node %u (%s)!\n", | 
|  | 2313 | dead_node, dlm->name); | 
|  | 2314 | list_del_init(&lock->list); | 
|  | 2315 | dlm_lock_put(lock); | 
|  | 2316 | break; | 
|  | 2317 | } | 
|  | 2318 | } | 
|  | 2319 | spin_unlock(&res->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2320 | continue; | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2321 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2322 | spin_lock(&res->spinlock); | 
|  | 2323 | /* zero the lvb if necessary */ | 
|  | 2324 | dlm_revalidate_lvb(dlm, res, dead_node); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2325 | if (res->owner == dead_node) { | 
| Wengang Wang | a524812 | 2010-07-30 16:14:44 +0800 | [diff] [blame] | 2326 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { | 
|  | 2327 | mlog(ML_NOTICE, "Ignore %.*s for " | 
|  | 2328 | "recovery as it is being freed\n", | 
|  | 2329 | res->lockname.len, | 
|  | 2330 | res->lockname.name); | 
|  | 2331 | } else | 
|  | 2332 | dlm_move_lockres_to_recovery_list(dlm, | 
|  | 2333 | res); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2334 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 2335 | } else if (res->owner == dlm->node_num) { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2336 | dlm_free_dead_locks(dlm, res, dead_node); | 
|  | 2337 | __dlm_lockres_calc_usage(dlm, res); | 
|  | 2338 | } | 
|  | 2339 | spin_unlock(&res->spinlock); | 
|  | 2340 | } | 
|  | 2341 | } | 
|  | 2342 |  | 
|  | 2343 | } | 
|  | 2344 |  | 
|  | 2345 | static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) | 
|  | 2346 | { | 
|  | 2347 | assert_spin_locked(&dlm->spinlock); | 
|  | 2348 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2349 | if (dlm->reco.new_master == idx) { | 
|  | 2350 | mlog(0, "%s: recovery master %d just died\n", | 
|  | 2351 | dlm->name, idx); | 
|  | 2352 | if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { | 
|  | 2353 | /* finalize1 was reached, so it is safe to clear | 
|  | 2354 | * the new_master and dead_node.  that recovery | 
|  | 2355 | * is complete. */ | 
|  | 2356 | mlog(0, "%s: dead master %d had reached " | 
|  | 2357 | "finalize1 state, clearing\n", dlm->name, idx); | 
|  | 2358 | dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; | 
|  | 2359 | __dlm_reset_recovery(dlm); | 
|  | 2360 | } | 
|  | 2361 | } | 
|  | 2362 |  | 
| Tao Ma | 2d4b1cb | 2008-01-10 15:20:55 +0800 | [diff] [blame] | 2363 | /* Clean up join state on node death. */ | 
|  | 2364 | if (dlm->joining_node == idx) { | 
|  | 2365 | mlog(0, "Clearing join state for node %u\n", idx); | 
|  | 2366 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | 
|  | 2367 | } | 
|  | 2368 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2369 | /* check to see if the node is already considered dead */ | 
|  | 2370 | if (!test_bit(idx, dlm->live_nodes_map)) { | 
|  | 2371 | mlog(0, "for domain %s, node %d is already dead. " | 
|  | 2372 | "another node likely did recovery already.\n", | 
|  | 2373 | dlm->name, idx); | 
|  | 2374 | return; | 
|  | 2375 | } | 
|  | 2376 |  | 
|  | 2377 | /* check to see if we do not care about this node */ | 
|  | 2378 | if (!test_bit(idx, dlm->domain_map)) { | 
|  | 2379 | /* This also catches the case that we get a node down | 
|  | 2380 | * but haven't joined the domain yet. */ | 
|  | 2381 | mlog(0, "node %u already removed from domain!\n", idx); | 
|  | 2382 | return; | 
|  | 2383 | } | 
|  | 2384 |  | 
|  | 2385 | clear_bit(idx, dlm->live_nodes_map); | 
|  | 2386 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2387 | /* make sure local cleanup occurs before the heartbeat events */ | 
|  | 2388 | if (!test_bit(idx, dlm->recovery_map)) | 
|  | 2389 | dlm_do_local_recovery_cleanup(dlm, idx); | 
|  | 2390 |  | 
|  | 2391 | /* notify anything attached to the heartbeat events */ | 
|  | 2392 | dlm_hb_event_notify_attached(dlm, idx, 0); | 
|  | 2393 |  | 
|  | 2394 | mlog(0, "node %u being removed from domain map!\n", idx); | 
|  | 2395 | clear_bit(idx, dlm->domain_map); | 
|  | 2396 | /* wake up migration waiters if a node goes down. | 
|  | 2397 | * perhaps later we can genericize this for other waiters. */ | 
|  | 2398 | wake_up(&dlm->migration_wq); | 
|  | 2399 |  | 
|  | 2400 | if (test_bit(idx, dlm->recovery_map)) | 
|  | 2401 | mlog(0, "domain %s, node %u already added " | 
|  | 2402 | "to recovery map!\n", dlm->name, idx); | 
|  | 2403 | else | 
|  | 2404 | set_bit(idx, dlm->recovery_map); | 
|  | 2405 | } | 
|  | 2406 |  | 
|  | 2407 | void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) | 
|  | 2408 | { | 
|  | 2409 | struct dlm_ctxt *dlm = data; | 
|  | 2410 |  | 
|  | 2411 | if (!dlm_grab(dlm)) | 
|  | 2412 | return; | 
|  | 2413 |  | 
| Mark Fasheh | 6561168 | 2007-09-07 11:11:10 -0700 | [diff] [blame] | 2414 | /* | 
|  | 2415 | * This will notify any dlm users that a node in our domain | 
|  | 2416 | * went away without notifying us first. | 
|  | 2417 | */ | 
|  | 2418 | if (test_bit(idx, dlm->domain_map)) | 
|  | 2419 | dlm_fire_domain_eviction_callbacks(dlm, idx); | 
|  | 2420 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2421 | spin_lock(&dlm->spinlock); | 
|  | 2422 | __dlm_hb_node_down(dlm, idx); | 
|  | 2423 | spin_unlock(&dlm->spinlock); | 
|  | 2424 |  | 
|  | 2425 | dlm_put(dlm); | 
|  | 2426 | } | 
|  | 2427 |  | 
|  | 2428 | void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) | 
|  | 2429 | { | 
|  | 2430 | struct dlm_ctxt *dlm = data; | 
|  | 2431 |  | 
|  | 2432 | if (!dlm_grab(dlm)) | 
|  | 2433 | return; | 
|  | 2434 |  | 
|  | 2435 | spin_lock(&dlm->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2436 | set_bit(idx, dlm->live_nodes_map); | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2437 | /* do NOT notify mle attached to the heartbeat events. | 
|  | 2438 | * new nodes are not interesting in mastery until joined. */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2439 | spin_unlock(&dlm->spinlock); | 
|  | 2440 |  | 
|  | 2441 | dlm_put(dlm); | 
|  | 2442 | } | 
|  | 2443 |  | 
|  | 2444 | static void dlm_reco_ast(void *astdata) | 
|  | 2445 | { | 
|  | 2446 | struct dlm_ctxt *dlm = astdata; | 
|  | 2447 | mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", | 
|  | 2448 | dlm->node_num, dlm->name); | 
|  | 2449 | } | 
|  | 2450 | static void dlm_reco_bast(void *astdata, int blocked_type) | 
|  | 2451 | { | 
|  | 2452 | struct dlm_ctxt *dlm = astdata; | 
|  | 2453 | mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", | 
|  | 2454 | dlm->node_num, dlm->name); | 
|  | 2455 | } | 
|  | 2456 | static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) | 
|  | 2457 | { | 
|  | 2458 | mlog(0, "unlockast for recovery lock fired!\n"); | 
|  | 2459 | } | 
|  | 2460 |  | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2461 | /* | 
|  | 2462 | * dlm_pick_recovery_master will continually attempt to use | 
|  | 2463 | * dlmlock() on the special "$RECOVERY" lockres with the | 
|  | 2464 | * LKM_NOQUEUE flag to get an EX.  every thread that enters | 
|  | 2465 | * this function on each node racing to become the recovery | 
|  | 2466 | * master will not stop attempting this until either: | 
|  | 2467 | * a) this node gets the EX (and becomes the recovery master), | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2468 | * or b) dlm->reco.new_master gets set to some nodenum | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2469 | * != O2NM_INVALID_NODE_NUM (another node will do the reco). | 
|  | 2470 | * so each time a recovery master is needed, the entire cluster | 
|  | 2471 | * will sync at this point.  if the new master dies, that will | 
|  | 2472 | * be detected in dlm_do_recovery */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2473 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) | 
|  | 2474 | { | 
|  | 2475 | enum dlm_status ret; | 
|  | 2476 | struct dlm_lockstatus lksb; | 
|  | 2477 | int status = -EINVAL; | 
|  | 2478 |  | 
|  | 2479 | mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", | 
|  | 2480 | dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2481 | again: | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2482 | memset(&lksb, 0, sizeof(lksb)); | 
|  | 2483 |  | 
|  | 2484 | ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, | 
| Mark Fasheh | 3384f3d | 2006-09-08 11:38:29 -0700 | [diff] [blame] | 2485 | DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, | 
|  | 2486 | dlm_reco_ast, dlm, dlm_reco_bast); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2487 |  | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2488 | mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", | 
|  | 2489 | dlm->name, ret, lksb.status); | 
|  | 2490 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2491 | if (ret == DLM_NORMAL) { | 
|  | 2492 | mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", | 
|  | 2493 | dlm->name, dlm->node_num); | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2494 |  | 
|  | 2495 | /* got the EX lock.  check to see if another node | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2496 | * just became the reco master */ | 
|  | 2497 | if (dlm_reco_master_ready(dlm)) { | 
|  | 2498 | mlog(0, "%s: got reco EX lock, but %u will " | 
|  | 2499 | "do the recovery\n", dlm->name, | 
|  | 2500 | dlm->reco.new_master); | 
|  | 2501 | status = -EEXIST; | 
|  | 2502 | } else { | 
| Kurt Hackel | 898effa | 2006-01-18 17:01:25 -0800 | [diff] [blame] | 2503 | status = 0; | 
|  | 2504 |  | 
|  | 2505 | /* see if recovery was already finished elsewhere */ | 
|  | 2506 | spin_lock(&dlm->spinlock); | 
|  | 2507 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2508 | status = -EINVAL; | 
| Kurt Hackel | 898effa | 2006-01-18 17:01:25 -0800 | [diff] [blame] | 2509 | mlog(0, "%s: got reco EX lock, but " | 
|  | 2510 | "node got recovered already\n", dlm->name); | 
|  | 2511 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { | 
|  | 2512 | mlog(ML_ERROR, "%s: new master is %u " | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2513 | "but no dead node!\n", | 
| Kurt Hackel | 898effa | 2006-01-18 17:01:25 -0800 | [diff] [blame] | 2514 | dlm->name, dlm->reco.new_master); | 
|  | 2515 | BUG(); | 
|  | 2516 | } | 
|  | 2517 | } | 
|  | 2518 | spin_unlock(&dlm->spinlock); | 
|  | 2519 | } | 
|  | 2520 |  | 
|  | 2521 | /* if this node has actually become the recovery master, | 
|  | 2522 | * set the master and send the messages to begin recovery */ | 
|  | 2523 | if (!status) { | 
|  | 2524 | mlog(0, "%s: dead=%u, this=%u, sending " | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2525 | "begin_reco now\n", dlm->name, | 
| Kurt Hackel | 898effa | 2006-01-18 17:01:25 -0800 | [diff] [blame] | 2526 | dlm->reco.dead_node, dlm->node_num); | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2527 | status = dlm_send_begin_reco_message(dlm, | 
|  | 2528 | dlm->reco.dead_node); | 
|  | 2529 | /* this always succeeds */ | 
|  | 2530 | BUG_ON(status); | 
|  | 2531 |  | 
|  | 2532 | /* set the new_master to this node */ | 
|  | 2533 | spin_lock(&dlm->spinlock); | 
| Kurt Hackel | ab27eb6 | 2006-04-27 18:03:49 -0700 | [diff] [blame] | 2534 | dlm_set_reco_master(dlm, dlm->node_num); | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2535 | spin_unlock(&dlm->spinlock); | 
|  | 2536 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2537 |  | 
|  | 2538 | /* recovery lock is a special case.  ast will not get fired, | 
|  | 2539 | * so just go ahead and unlock it. */ | 
|  | 2540 | ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2541 | if (ret == DLM_DENIED) { | 
|  | 2542 | mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); | 
|  | 2543 | ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); | 
|  | 2544 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2545 | if (ret != DLM_NORMAL) { | 
|  | 2546 | /* this would really suck. this could only happen | 
|  | 2547 | * if there was a network error during the unlock | 
|  | 2548 | * because of node death.  this means the unlock | 
|  | 2549 | * is actually "done" and the lock structure is | 
|  | 2550 | * even freed.  we can continue, but only | 
|  | 2551 | * because this specific lock name is special. */ | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2552 | mlog(ML_ERROR, "dlmunlock returned %d\n", ret); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2553 | } | 
|  | 2554 | } else if (ret == DLM_NOTQUEUED) { | 
|  | 2555 | mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", | 
|  | 2556 | dlm->name, dlm->node_num); | 
|  | 2557 | /* another node is master. wait on | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2558 | * reco.new_master != O2NM_INVALID_NODE_NUM | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2559 | * for at most one second */ | 
|  | 2560 | wait_event_timeout(dlm->dlm_reco_thread_wq, | 
|  | 2561 | dlm_reco_master_ready(dlm), | 
|  | 2562 | msecs_to_jiffies(1000)); | 
|  | 2563 | if (!dlm_reco_master_ready(dlm)) { | 
|  | 2564 | mlog(0, "%s: reco master taking awhile\n", | 
|  | 2565 | dlm->name); | 
|  | 2566 | goto again; | 
|  | 2567 | } | 
|  | 2568 | /* another node has informed this one that it is reco master */ | 
|  | 2569 | mlog(0, "%s: reco master %u is ready to recover %u\n", | 
|  | 2570 | dlm->name, dlm->reco.new_master, dlm->reco.dead_node); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2571 | status = -EEXIST; | 
| Kurt Hackel | c8df412 | 2006-05-01 13:47:50 -0700 | [diff] [blame] | 2572 | } else if (ret == DLM_RECOVERING) { | 
|  | 2573 | mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", | 
|  | 2574 | dlm->name, dlm->node_num); | 
|  | 2575 | goto again; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2576 | } else { | 
|  | 2577 | struct dlm_lock_resource *res; | 
|  | 2578 |  | 
|  | 2579 | /* dlmlock returned something other than NOTQUEUED or NORMAL */ | 
|  | 2580 | mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " | 
|  | 2581 | "lksb.status=%s\n", dlm->name, dlm_errname(ret), | 
|  | 2582 | dlm_errname(lksb.status)); | 
|  | 2583 | res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, | 
|  | 2584 | DLM_RECOVERY_LOCK_NAME_LEN); | 
|  | 2585 | if (res) { | 
|  | 2586 | dlm_print_one_lock_resource(res); | 
|  | 2587 | dlm_lockres_put(res); | 
|  | 2588 | } else { | 
|  | 2589 | mlog(ML_ERROR, "recovery lock not found\n"); | 
|  | 2590 | } | 
|  | 2591 | BUG(); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2592 | } | 
|  | 2593 |  | 
|  | 2594 | return status; | 
|  | 2595 | } | 
|  | 2596 |  | 
|  | 2597 | static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) | 
|  | 2598 | { | 
|  | 2599 | struct dlm_begin_reco br; | 
|  | 2600 | int ret = 0; | 
|  | 2601 | struct dlm_node_iter iter; | 
|  | 2602 | int nodenum; | 
|  | 2603 | int status; | 
|  | 2604 |  | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 2605 | mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2606 |  | 
|  | 2607 | spin_lock(&dlm->spinlock); | 
|  | 2608 | dlm_node_iter_init(dlm->domain_map, &iter); | 
|  | 2609 | spin_unlock(&dlm->spinlock); | 
|  | 2610 |  | 
|  | 2611 | clear_bit(dead_node, iter.node_map); | 
|  | 2612 |  | 
|  | 2613 | memset(&br, 0, sizeof(br)); | 
|  | 2614 | br.node_idx = dlm->node_num; | 
|  | 2615 | br.dead_node = dead_node; | 
|  | 2616 |  | 
|  | 2617 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | 
|  | 2618 | ret = 0; | 
|  | 2619 | if (nodenum == dead_node) { | 
|  | 2620 | mlog(0, "not sending begin reco to dead node " | 
|  | 2621 | "%u\n", dead_node); | 
|  | 2622 | continue; | 
|  | 2623 | } | 
|  | 2624 | if (nodenum == dlm->node_num) { | 
|  | 2625 | mlog(0, "not sending begin reco to self\n"); | 
|  | 2626 | continue; | 
|  | 2627 | } | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2628 | retry: | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2629 | ret = -EINVAL; | 
|  | 2630 | mlog(0, "attempting to send begin reco msg to %d\n", | 
|  | 2631 | nodenum); | 
|  | 2632 | ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, | 
|  | 2633 | &br, sizeof(br), nodenum, &status); | 
|  | 2634 | /* negative status is handled ok by caller here */ | 
|  | 2635 | if (ret >= 0) | 
|  | 2636 | ret = status; | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2637 | if (dlm_is_host_down(ret)) { | 
|  | 2638 | /* node is down.  not involved in recovery | 
|  | 2639 | * so just keep going */ | 
| Wengang Wang | a5196ec | 2010-03-30 12:09:22 +0800 | [diff] [blame] | 2640 | mlog(ML_NOTICE, "%s: node %u was down when sending " | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2641 | "begin reco msg (%d)\n", dlm->name, nodenum, ret); | 
|  | 2642 | ret = 0; | 
|  | 2643 | } | 
| Sunil Mushran | cd34edd | 2010-01-25 17:58:30 -0800 | [diff] [blame] | 2644 |  | 
|  | 2645 | /* | 
|  | 2646 | * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, | 
|  | 2647 | * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. | 
|  | 2648 | * We are handling both for compatibility reasons. | 
|  | 2649 | */ | 
|  | 2650 | if (ret == -EAGAIN || ret == EAGAIN) { | 
| Tiger Yang | aad1b15 | 2009-11-19 10:17:46 +0800 | [diff] [blame] | 2651 | mlog(0, "%s: trying to start recovery of node " | 
|  | 2652 | "%u, but node %u is waiting for last recovery " | 
|  | 2653 | "to complete, backoff for a bit\n", dlm->name, | 
|  | 2654 | dead_node, nodenum); | 
|  | 2655 | msleep(100); | 
|  | 2656 | goto retry; | 
|  | 2657 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2658 | if (ret < 0) { | 
|  | 2659 | struct dlm_lock_resource *res; | 
| Wengang Wang | a5196ec | 2010-03-30 12:09:22 +0800 | [diff] [blame] | 2660 |  | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2661 | /* this is now a serious problem, possibly ENOMEM | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2662 | * in the network stack.  must retry */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2663 | mlog_errno(ret); | 
|  | 2664 | mlog(ML_ERROR, "begin reco of dlm %s to node %u " | 
| Wengang Wang | a5196ec | 2010-03-30 12:09:22 +0800 | [diff] [blame] | 2665 | "returned %d\n", dlm->name, nodenum, ret); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2666 | res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, | 
|  | 2667 | DLM_RECOVERY_LOCK_NAME_LEN); | 
|  | 2668 | if (res) { | 
|  | 2669 | dlm_print_one_lock_resource(res); | 
|  | 2670 | dlm_lockres_put(res); | 
|  | 2671 | } else { | 
|  | 2672 | mlog(ML_ERROR, "recovery lock not found\n"); | 
|  | 2673 | } | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2674 | /* sleep for a bit in hopes that we can avoid | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2675 | * another ENOMEM */ | 
|  | 2676 | msleep(100); | 
|  | 2677 | goto retry; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2678 | } | 
|  | 2679 | } | 
|  | 2680 |  | 
|  | 2681 | return ret; | 
|  | 2682 | } | 
|  | 2683 |  | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 2684 | int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 2685 | void **ret_data) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2686 | { | 
|  | 2687 | struct dlm_ctxt *dlm = data; | 
|  | 2688 | struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; | 
|  | 2689 |  | 
|  | 2690 | /* ok to return 0, domain has gone away */ | 
|  | 2691 | if (!dlm_grab(dlm)) | 
|  | 2692 | return 0; | 
|  | 2693 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2694 | spin_lock(&dlm->spinlock); | 
|  | 2695 | if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { | 
|  | 2696 | mlog(0, "%s: node %u wants to recover node %u (%u:%u) " | 
|  | 2697 | "but this node is in finalize state, waiting on finalize2\n", | 
|  | 2698 | dlm->name, br->node_idx, br->dead_node, | 
|  | 2699 | dlm->reco.dead_node, dlm->reco.new_master); | 
|  | 2700 | spin_unlock(&dlm->spinlock); | 
| Tiger Yang | aad1b15 | 2009-11-19 10:17:46 +0800 | [diff] [blame] | 2701 | return -EAGAIN; | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2702 | } | 
|  | 2703 | spin_unlock(&dlm->spinlock); | 
|  | 2704 |  | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 2705 | mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", | 
|  | 2706 | dlm->name, br->node_idx, br->dead_node, | 
|  | 2707 | dlm->reco.dead_node, dlm->reco.new_master); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2708 |  | 
|  | 2709 | dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); | 
|  | 2710 |  | 
|  | 2711 | spin_lock(&dlm->spinlock); | 
|  | 2712 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2713 | if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { | 
|  | 2714 | mlog(0, "%s: new_master %u died, changing " | 
|  | 2715 | "to %u\n", dlm->name, dlm->reco.new_master, | 
|  | 2716 | br->node_idx); | 
|  | 2717 | } else { | 
|  | 2718 | mlog(0, "%s: new_master %u NOT DEAD, changing " | 
|  | 2719 | "to %u\n", dlm->name, dlm->reco.new_master, | 
|  | 2720 | br->node_idx); | 
|  | 2721 | /* may not have seen the new master as dead yet */ | 
|  | 2722 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2723 | } | 
|  | 2724 | if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2725 | mlog(ML_NOTICE, "%s: dead_node previously set to %u, " | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2726 | "node %u changing it to %u\n", dlm->name, | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2727 | dlm->reco.dead_node, br->node_idx, br->dead_node); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2728 | } | 
| Kurt Hackel | ab27eb6 | 2006-04-27 18:03:49 -0700 | [diff] [blame] | 2729 | dlm_set_reco_master(dlm, br->node_idx); | 
|  | 2730 | dlm_set_reco_dead_node(dlm, br->dead_node); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2731 | if (!test_bit(br->dead_node, dlm->recovery_map)) { | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2732 | mlog(0, "recovery master %u sees %u as dead, but this " | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2733 | "node has not yet.  marking %u as dead\n", | 
|  | 2734 | br->node_idx, br->dead_node, br->dead_node); | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 2735 | if (!test_bit(br->dead_node, dlm->domain_map) || | 
|  | 2736 | !test_bit(br->dead_node, dlm->live_nodes_map)) | 
|  | 2737 | mlog(0, "%u not in domain/live_nodes map " | 
|  | 2738 | "so setting it in reco map manually\n", | 
|  | 2739 | br->dead_node); | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 2740 | /* force the recovery cleanup in __dlm_hb_node_down | 
|  | 2741 | * both of these will be cleared in a moment */ | 
|  | 2742 | set_bit(br->dead_node, dlm->domain_map); | 
|  | 2743 | set_bit(br->dead_node, dlm->live_nodes_map); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2744 | __dlm_hb_node_down(dlm, br->dead_node); | 
|  | 2745 | } | 
|  | 2746 | spin_unlock(&dlm->spinlock); | 
|  | 2747 |  | 
|  | 2748 | dlm_kick_recovery_thread(dlm); | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 2749 |  | 
|  | 2750 | mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", | 
|  | 2751 | dlm->name, br->node_idx, br->dead_node, | 
|  | 2752 | dlm->reco.dead_node, dlm->reco.new_master); | 
|  | 2753 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2754 | dlm_put(dlm); | 
|  | 2755 | return 0; | 
|  | 2756 | } | 
|  | 2757 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2758 | #define DLM_FINALIZE_STAGE2  0x01 | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2759 | static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) | 
|  | 2760 | { | 
|  | 2761 | int ret = 0; | 
|  | 2762 | struct dlm_finalize_reco fr; | 
|  | 2763 | struct dlm_node_iter iter; | 
|  | 2764 | int nodenum; | 
|  | 2765 | int status; | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2766 | int stage = 1; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2767 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2768 | mlog(0, "finishing recovery for node %s:%u, " | 
|  | 2769 | "stage %d\n", dlm->name, dlm->reco.dead_node, stage); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2770 |  | 
|  | 2771 | spin_lock(&dlm->spinlock); | 
|  | 2772 | dlm_node_iter_init(dlm->domain_map, &iter); | 
|  | 2773 | spin_unlock(&dlm->spinlock); | 
|  | 2774 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2775 | stage2: | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2776 | memset(&fr, 0, sizeof(fr)); | 
|  | 2777 | fr.node_idx = dlm->node_num; | 
|  | 2778 | fr.dead_node = dlm->reco.dead_node; | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2779 | if (stage == 2) | 
|  | 2780 | fr.flags |= DLM_FINALIZE_STAGE2; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2781 |  | 
|  | 2782 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | 
|  | 2783 | if (nodenum == dlm->node_num) | 
|  | 2784 | continue; | 
|  | 2785 | ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, | 
|  | 2786 | &fr, sizeof(fr), nodenum, &status); | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2787 | if (ret >= 0) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2788 | ret = status; | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2789 | if (ret < 0) { | 
| Wengang Wang | a5196ec | 2010-03-30 12:09:22 +0800 | [diff] [blame] | 2790 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 
|  | 2791 | "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, | 
|  | 2792 | dlm->key, nodenum); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2793 | if (dlm_is_host_down(ret)) { | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2794 | /* this has no effect on this recovery | 
|  | 2795 | * session, so set the status to zero to | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2796 | * finish out the last recovery */ | 
|  | 2797 | mlog(ML_ERROR, "node %u went down after this " | 
|  | 2798 | "node finished recovery.\n", nodenum); | 
|  | 2799 | ret = 0; | 
| Kurt Hackel | c27069e | 2006-05-01 13:51:49 -0700 | [diff] [blame] | 2800 | continue; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2801 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2802 | break; | 
|  | 2803 | } | 
|  | 2804 | } | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2805 | if (stage == 1) { | 
|  | 2806 | /* reset the node_iter back to the top and send finalize2 */ | 
|  | 2807 | iter.curnode = -1; | 
|  | 2808 | stage = 2; | 
|  | 2809 | goto stage2; | 
|  | 2810 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2811 |  | 
|  | 2812 | return ret; | 
|  | 2813 | } | 
|  | 2814 |  | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 2815 | int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 2816 | void **ret_data) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2817 | { | 
|  | 2818 | struct dlm_ctxt *dlm = data; | 
|  | 2819 | struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2820 | int stage = 1; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2821 |  | 
|  | 2822 | /* ok to return 0, domain has gone away */ | 
|  | 2823 | if (!dlm_grab(dlm)) | 
|  | 2824 | return 0; | 
|  | 2825 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2826 | if (fr->flags & DLM_FINALIZE_STAGE2) | 
|  | 2827 | stage = 2; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2828 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2829 | mlog(0, "%s: node %u finalizing recovery stage%d of " | 
|  | 2830 | "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, | 
|  | 2831 | fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); | 
| Sunil Mushran | 2bd6321 | 2010-01-25 16:57:38 -0800 | [diff] [blame] | 2832 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2833 | spin_lock(&dlm->spinlock); | 
|  | 2834 |  | 
|  | 2835 | if (dlm->reco.new_master != fr->node_idx) { | 
|  | 2836 | mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " | 
|  | 2837 | "%u is supposed to be the new master, dead=%u\n", | 
|  | 2838 | fr->node_idx, dlm->reco.new_master, fr->dead_node); | 
|  | 2839 | BUG(); | 
|  | 2840 | } | 
|  | 2841 | if (dlm->reco.dead_node != fr->dead_node) { | 
|  | 2842 | mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " | 
|  | 2843 | "node %u, but node %u is supposed to be dead\n", | 
|  | 2844 | fr->node_idx, fr->dead_node, dlm->reco.dead_node); | 
|  | 2845 | BUG(); | 
|  | 2846 | } | 
|  | 2847 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 2848 | switch (stage) { | 
|  | 2849 | case 1: | 
|  | 2850 | dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); | 
|  | 2851 | if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { | 
|  | 2852 | mlog(ML_ERROR, "%s: received finalize1 from " | 
|  | 2853 | "new master %u for dead node %u, but " | 
|  | 2854 | "this node has already received it!\n", | 
|  | 2855 | dlm->name, fr->node_idx, fr->dead_node); | 
|  | 2856 | dlm_print_reco_node_status(dlm); | 
|  | 2857 | BUG(); | 
|  | 2858 | } | 
|  | 2859 | dlm->reco.state |= DLM_RECO_STATE_FINALIZE; | 
|  | 2860 | spin_unlock(&dlm->spinlock); | 
|  | 2861 | break; | 
|  | 2862 | case 2: | 
|  | 2863 | if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { | 
|  | 2864 | mlog(ML_ERROR, "%s: received finalize2 from " | 
|  | 2865 | "new master %u for dead node %u, but " | 
|  | 2866 | "this node did not have finalize1!\n", | 
|  | 2867 | dlm->name, fr->node_idx, fr->dead_node); | 
|  | 2868 | dlm_print_reco_node_status(dlm); | 
|  | 2869 | BUG(); | 
|  | 2870 | } | 
|  | 2871 | dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; | 
|  | 2872 | spin_unlock(&dlm->spinlock); | 
|  | 2873 | dlm_reset_recovery(dlm); | 
|  | 2874 | dlm_kick_recovery_thread(dlm); | 
|  | 2875 | break; | 
|  | 2876 | default: | 
|  | 2877 | BUG(); | 
|  | 2878 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2879 |  | 
| Kurt Hackel | d6dea6e | 2006-04-27 18:08:51 -0700 | [diff] [blame] | 2880 | mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", | 
|  | 2881 | dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); | 
|  | 2882 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 2883 | dlm_put(dlm); | 
|  | 2884 | return 0; | 
|  | 2885 | } |