| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1 | /* -*- mode: c; c-basic-offset: 8; -*- | 
|  | 2 | * vim: noexpandtab sw=8 ts=8 sts=0: | 
|  | 3 | * | 
|  | 4 | * dlmcommon.h | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 2004 Oracle.  All rights reserved. | 
|  | 7 | * | 
|  | 8 | * This program is free software; you can redistribute it and/or | 
|  | 9 | * modify it under the terms of the GNU General Public | 
|  | 10 | * License as published by the Free Software Foundation; either | 
|  | 11 | * version 2 of the License, or (at your option) any later version. | 
|  | 12 | * | 
|  | 13 | * This program is distributed in the hope that it will be useful, | 
|  | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 16 | * General Public License for more details. | 
|  | 17 | * | 
|  | 18 | * You should have received a copy of the GNU General Public | 
|  | 19 | * License along with this program; if not, write to the | 
|  | 20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 21 | * Boston, MA 021110-1307, USA. | 
|  | 22 | * | 
|  | 23 | */ | 
|  | 24 |  | 
|  | 25 | #ifndef DLMCOMMON_H | 
|  | 26 | #define DLMCOMMON_H | 
|  | 27 |  | 
|  | 28 | #include <linux/kref.h> | 
|  | 29 |  | 
|  | 30 | #define DLM_HB_NODE_DOWN_PRI     (0xf000000) | 
|  | 31 | #define DLM_HB_NODE_UP_PRI       (0x8000000) | 
|  | 32 |  | 
|  | 33 | #define DLM_LOCKID_NAME_MAX    32 | 
|  | 34 |  | 
|  | 35 | #define DLM_DOMAIN_NAME_MAX_LEN    255 | 
|  | 36 | #define DLM_LOCK_RES_OWNER_UNKNOWN     O2NM_MAX_NODES | 
|  | 37 | #define DLM_THREAD_SHUFFLE_INTERVAL    5     // flush everything every 5 passes | 
|  | 38 | #define DLM_THREAD_MS                  200   // flush at least every 200 ms | 
|  | 39 |  | 
| Sunil Mushran | 0467ae9 | 2010-05-05 16:25:08 -0700 | [diff] [blame] | 40 | #define DLM_HASH_SIZE_DEFAULT	(1 << 17) | 
| Joel Becker | c8f33b6 | 2006-03-16 17:40:37 -0800 | [diff] [blame] | 41 | #if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE | 
|  | 42 | # define DLM_HASH_PAGES		1 | 
|  | 43 | #else | 
|  | 44 | # define DLM_HASH_PAGES		(DLM_HASH_SIZE_DEFAULT / PAGE_SIZE) | 
|  | 45 | #endif | 
| Daniel Phillips | 03d864c | 2006-03-10 18:08:16 -0800 | [diff] [blame] | 46 | #define DLM_BUCKETS_PER_PAGE	(PAGE_SIZE / sizeof(struct hlist_head)) | 
|  | 47 | #define DLM_HASH_BUCKETS	(DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE) | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 48 |  | 
| Mark Fasheh | a3d3329 | 2006-03-09 17:55:56 -0800 | [diff] [blame] | 49 | /* Intended to make it easier for us to switch out hash functions */ | 
|  | 50 | #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) | 
|  | 51 |  | 
| Sunil Mushran | 751155a | 2008-03-10 15:16:25 -0700 | [diff] [blame] | 52 | enum dlm_mle_type { | 
|  | 53 | DLM_MLE_BLOCK, | 
|  | 54 | DLM_MLE_MASTER, | 
| Sunil Mushran | 2041d8f | 2009-02-26 15:00:43 -0800 | [diff] [blame] | 55 | DLM_MLE_MIGRATION, | 
|  | 56 | DLM_MLE_NUM_TYPES | 
| Sunil Mushran | 751155a | 2008-03-10 15:16:25 -0700 | [diff] [blame] | 57 | }; | 
|  | 58 |  | 
| Sunil Mushran | 751155a | 2008-03-10 15:16:25 -0700 | [diff] [blame] | 59 | struct dlm_master_list_entry { | 
| Sunil Mushran | 2ed6c75 | 2009-02-26 15:00:41 -0800 | [diff] [blame] | 60 | struct hlist_node master_hash_node; | 
| Sunil Mushran | 751155a | 2008-03-10 15:16:25 -0700 | [diff] [blame] | 61 | struct list_head hb_events; | 
|  | 62 | struct dlm_ctxt *dlm; | 
|  | 63 | spinlock_t spinlock; | 
|  | 64 | wait_queue_head_t wq; | 
|  | 65 | atomic_t woken; | 
|  | 66 | struct kref mle_refs; | 
|  | 67 | int inuse; | 
|  | 68 | unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 69 | unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 70 | unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 71 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 72 | u8 master; | 
|  | 73 | u8 new_master; | 
|  | 74 | enum dlm_mle_type type; | 
|  | 75 | struct o2hb_callback_func mle_hb_up; | 
|  | 76 | struct o2hb_callback_func mle_hb_down; | 
| Sunil Mushran | 7141514 | 2009-02-26 15:00:47 -0800 | [diff] [blame] | 77 | struct dlm_lock_resource *mleres; | 
|  | 78 | unsigned char mname[DLM_LOCKID_NAME_MAX]; | 
|  | 79 | unsigned int mnamelen; | 
|  | 80 | unsigned int mnamehash; | 
| Sunil Mushran | 751155a | 2008-03-10 15:16:25 -0700 | [diff] [blame] | 81 | }; | 
|  | 82 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 83 | enum dlm_ast_type { | 
|  | 84 | DLM_AST = 0, | 
|  | 85 | DLM_BAST, | 
|  | 86 | DLM_ASTUNLOCK | 
|  | 87 | }; | 
|  | 88 |  | 
|  | 89 |  | 
|  | 90 | #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \ | 
|  | 91 | LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \ | 
|  | 92 | LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE) | 
|  | 93 |  | 
|  | 94 | #define DLM_RECOVERY_LOCK_NAME       "$RECOVERY" | 
|  | 95 | #define DLM_RECOVERY_LOCK_NAME_LEN   9 | 
|  | 96 |  | 
|  | 97 | static inline int dlm_is_recovery_lock(const char *lock_name, int name_len) | 
|  | 98 | { | 
|  | 99 | if (name_len == DLM_RECOVERY_LOCK_NAME_LEN && | 
|  | 100 | memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0) | 
|  | 101 | return 1; | 
|  | 102 | return 0; | 
|  | 103 | } | 
|  | 104 |  | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 105 | #define DLM_RECO_STATE_ACTIVE    0x0001 | 
|  | 106 | #define DLM_RECO_STATE_FINALIZE  0x0002 | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 107 |  | 
|  | 108 | struct dlm_recovery_ctxt | 
|  | 109 | { | 
|  | 110 | struct list_head resources; | 
|  | 111 | struct list_head received; | 
|  | 112 | struct list_head node_data; | 
|  | 113 | u8  new_master; | 
|  | 114 | u8  dead_node; | 
|  | 115 | u16 state; | 
|  | 116 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 117 | wait_queue_head_t event; | 
|  | 118 | }; | 
|  | 119 |  | 
|  | 120 | enum dlm_ctxt_state { | 
|  | 121 | DLM_CTXT_NEW = 0, | 
|  | 122 | DLM_CTXT_JOINED, | 
|  | 123 | DLM_CTXT_IN_SHUTDOWN, | 
|  | 124 | DLM_CTXT_LEAVING, | 
|  | 125 | }; | 
|  | 126 |  | 
|  | 127 | struct dlm_ctxt | 
|  | 128 | { | 
|  | 129 | struct list_head list; | 
| Daniel Phillips | 03d864c | 2006-03-10 18:08:16 -0800 | [diff] [blame] | 130 | struct hlist_head **lockres_hash; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 131 | struct list_head dirty_list; | 
|  | 132 | struct list_head purge_list; | 
|  | 133 | struct list_head pending_asts; | 
|  | 134 | struct list_head pending_basts; | 
| Sunil Mushran | 29576f8 | 2008-03-10 15:16:21 -0700 | [diff] [blame] | 135 | struct list_head tracking_list; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 136 | unsigned int purge_count; | 
|  | 137 | spinlock_t spinlock; | 
|  | 138 | spinlock_t ast_lock; | 
| Sunil Mushran | b0d4f81 | 2008-12-16 15:49:22 -0800 | [diff] [blame] | 139 | spinlock_t track_lock; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 140 | char *name; | 
|  | 141 | u8 node_num; | 
|  | 142 | u32 key; | 
|  | 143 | u8  joining_node; | 
|  | 144 | wait_queue_head_t dlm_join_events; | 
|  | 145 | unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 146 | unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 147 | unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 148 | struct dlm_recovery_ctxt reco; | 
|  | 149 | spinlock_t master_lock; | 
| Sunil Mushran | e2b66dd | 2009-02-26 15:00:40 -0800 | [diff] [blame] | 150 | struct hlist_head **master_hash; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 151 | struct list_head mle_hb_events; | 
|  | 152 |  | 
|  | 153 | /* these give a really vague idea of the system load */ | 
| Sunil Mushran | 2041d8f | 2009-02-26 15:00:43 -0800 | [diff] [blame] | 154 | atomic_t mle_tot_count[DLM_MLE_NUM_TYPES]; | 
|  | 155 | atomic_t mle_cur_count[DLM_MLE_NUM_TYPES]; | 
| Sunil Mushran | 6800791 | 2009-02-26 15:00:44 -0800 | [diff] [blame] | 156 | atomic_t res_tot_count; | 
|  | 157 | atomic_t res_cur_count; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 158 |  | 
| Sunil Mushran | 007dce5 | 2008-03-10 15:16:23 -0700 | [diff] [blame] | 159 | struct dlm_debug_ctxt *dlm_debug_ctxt; | 
| Sunil Mushran | 6325b4a | 2008-03-10 15:16:22 -0700 | [diff] [blame] | 160 | struct dentry *dlm_debugfs_subroot; | 
|  | 161 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 162 | /* NOTE: Next three are protected by dlm_domain_lock */ | 
|  | 163 | struct kref dlm_refs; | 
|  | 164 | enum dlm_ctxt_state dlm_state; | 
|  | 165 | unsigned int num_joins; | 
|  | 166 |  | 
|  | 167 | struct o2hb_callback_func dlm_hb_up; | 
|  | 168 | struct o2hb_callback_func dlm_hb_down; | 
|  | 169 | struct task_struct *dlm_thread_task; | 
|  | 170 | struct task_struct *dlm_reco_thread_task; | 
| Kurt Hackel | 3156d26 | 2006-05-01 14:39:29 -0700 | [diff] [blame] | 171 | struct workqueue_struct *dlm_worker; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 172 | wait_queue_head_t dlm_thread_wq; | 
|  | 173 | wait_queue_head_t dlm_reco_thread_wq; | 
|  | 174 | wait_queue_head_t ast_wq; | 
|  | 175 | wait_queue_head_t migration_wq; | 
|  | 176 |  | 
|  | 177 | struct work_struct dispatched_work; | 
|  | 178 | struct list_head work_list; | 
|  | 179 | spinlock_t work_lock; | 
|  | 180 | struct list_head dlm_domain_handlers; | 
|  | 181 | struct list_head	dlm_eviction_callbacks; | 
| Joel Becker | d24fbcd | 2008-01-25 17:02:21 -0800 | [diff] [blame] | 182 |  | 
|  | 183 | /* The filesystem specifies this at domain registration.  We | 
|  | 184 | * cache it here to know what to tell other nodes. */ | 
|  | 185 | struct dlm_protocol_version fs_locking_proto; | 
|  | 186 | /* This is the inter-dlm communication version */ | 
|  | 187 | struct dlm_protocol_version dlm_locking_proto; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 188 | }; | 
|  | 189 |  | 
| Daniel Phillips | 03d864c | 2006-03-10 18:08:16 -0800 | [diff] [blame] | 190 | static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) | 
|  | 191 | { | 
|  | 192 | return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE); | 
|  | 193 | } | 
|  | 194 |  | 
| Sunil Mushran | e2b66dd | 2009-02-26 15:00:40 -0800 | [diff] [blame] | 195 | static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm, | 
|  | 196 | unsigned i) | 
|  | 197 | { | 
|  | 198 | return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + | 
|  | 199 | (i % DLM_BUCKETS_PER_PAGE); | 
|  | 200 | } | 
|  | 201 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 202 | /* these keventd work queue items are for less-frequently | 
|  | 203 | * called functions that cannot be directly called from the | 
|  | 204 | * net message handlers for some reason, usually because | 
|  | 205 | * they need to send net messages of their own. */ | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 206 | void dlm_dispatch_work(struct work_struct *work); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 207 |  | 
|  | 208 | struct dlm_lock_resource; | 
|  | 209 | struct dlm_work_item; | 
|  | 210 |  | 
|  | 211 | typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *); | 
|  | 212 |  | 
|  | 213 | struct dlm_request_all_locks_priv | 
|  | 214 | { | 
|  | 215 | u8 reco_master; | 
|  | 216 | u8 dead_node; | 
|  | 217 | }; | 
|  | 218 |  | 
|  | 219 | struct dlm_mig_lockres_priv | 
|  | 220 | { | 
|  | 221 | struct dlm_lock_resource *lockres; | 
|  | 222 | u8 real_master; | 
| Sunil Mushran | 52987e2 | 2008-03-01 14:04:21 -0800 | [diff] [blame] | 223 | u8 extra_ref; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 224 | }; | 
|  | 225 |  | 
|  | 226 | struct dlm_assert_master_priv | 
|  | 227 | { | 
|  | 228 | struct dlm_lock_resource *lockres; | 
|  | 229 | u8 request_from; | 
|  | 230 | u32 flags; | 
|  | 231 | unsigned ignore_higher:1; | 
|  | 232 | }; | 
|  | 233 |  | 
| Sunil Mushran | f3f8546 | 2007-01-29 15:19:16 -0800 | [diff] [blame] | 234 | struct dlm_deref_lockres_priv | 
|  | 235 | { | 
|  | 236 | struct dlm_lock_resource *deref_res; | 
|  | 237 | u8 deref_node; | 
|  | 238 | }; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 239 |  | 
|  | 240 | struct dlm_work_item | 
|  | 241 | { | 
|  | 242 | struct list_head list; | 
|  | 243 | dlm_workfunc_t *func; | 
|  | 244 | struct dlm_ctxt *dlm; | 
|  | 245 | void *data; | 
|  | 246 | union { | 
|  | 247 | struct dlm_request_all_locks_priv ral; | 
|  | 248 | struct dlm_mig_lockres_priv ml; | 
|  | 249 | struct dlm_assert_master_priv am; | 
| Sunil Mushran | f3f8546 | 2007-01-29 15:19:16 -0800 | [diff] [blame] | 250 | struct dlm_deref_lockres_priv dl; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 251 | } u; | 
|  | 252 | }; | 
|  | 253 |  | 
|  | 254 | static inline void dlm_init_work_item(struct dlm_ctxt *dlm, | 
|  | 255 | struct dlm_work_item *i, | 
|  | 256 | dlm_workfunc_t *f, void *data) | 
|  | 257 | { | 
|  | 258 | memset(i, 0, sizeof(*i)); | 
|  | 259 | i->func = f; | 
|  | 260 | INIT_LIST_HEAD(&i->list); | 
|  | 261 | i->data = data; | 
|  | 262 | i->dlm = dlm;  /* must have already done a dlm_grab on this! */ | 
|  | 263 | } | 
|  | 264 |  | 
|  | 265 |  | 
|  | 266 |  | 
|  | 267 | static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm, | 
|  | 268 | u8 node) | 
|  | 269 | { | 
|  | 270 | assert_spin_locked(&dlm->spinlock); | 
|  | 271 |  | 
|  | 272 | dlm->joining_node = node; | 
|  | 273 | wake_up(&dlm->dlm_join_events); | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | #define DLM_LOCK_RES_UNINITED             0x00000001 | 
|  | 277 | #define DLM_LOCK_RES_RECOVERING           0x00000002 | 
|  | 278 | #define DLM_LOCK_RES_READY                0x00000004 | 
|  | 279 | #define DLM_LOCK_RES_DIRTY                0x00000008 | 
|  | 280 | #define DLM_LOCK_RES_IN_PROGRESS          0x00000010 | 
|  | 281 | #define DLM_LOCK_RES_MIGRATING            0x00000020 | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 282 | #define DLM_LOCK_RES_DROPPING_REF         0x00000040 | 
| Kurt Hackel | ddc09c8 | 2007-01-05 15:00:17 -0800 | [diff] [blame] | 283 | #define DLM_LOCK_RES_BLOCK_DIRTY          0x00001000 | 
| Kurt Hackel | 3b8118c | 2007-01-17 17:05:53 -0800 | [diff] [blame] | 284 | #define DLM_LOCK_RES_SETREF_INPROG        0x00002000 | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 285 |  | 
| Kurt Hackel | 44465a7 | 2006-01-18 17:05:38 -0800 | [diff] [blame] | 286 | /* max milliseconds to wait to sync up a network failure with a node death */ | 
|  | 287 | #define DLM_NODE_DEATH_WAIT_MAX (5 * 1000) | 
|  | 288 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 289 | #define DLM_PURGE_INTERVAL_MS   (8 * 1000) | 
|  | 290 |  | 
|  | 291 | struct dlm_lock_resource | 
|  | 292 | { | 
|  | 293 | /* WARNING: Please see the comment in dlm_init_lockres before | 
|  | 294 | * adding fields here. */ | 
| Mark Fasheh | 81f2094 | 2006-02-28 17:31:22 -0800 | [diff] [blame] | 295 | struct hlist_node hash_node; | 
| Mark Fasheh | 65c491d | 2006-03-06 15:36:17 -0800 | [diff] [blame] | 296 | struct qstr lockname; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 297 | struct kref      refs; | 
|  | 298 |  | 
| Kurt Hackel | 6ff06a9 | 2006-05-01 11:51:45 -0700 | [diff] [blame] | 299 | /* | 
|  | 300 | * Please keep granted, converting, and blocked in this order, | 
|  | 301 | * as some funcs want to iterate over all lists. | 
|  | 302 | * | 
|  | 303 | * All four lists are protected by the hash's reference. | 
|  | 304 | */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 305 | struct list_head granted; | 
|  | 306 | struct list_head converting; | 
|  | 307 | struct list_head blocked; | 
| Kurt Hackel | 6ff06a9 | 2006-05-01 11:51:45 -0700 | [diff] [blame] | 308 | struct list_head purge; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 309 |  | 
| Kurt Hackel | 6ff06a9 | 2006-05-01 11:51:45 -0700 | [diff] [blame] | 310 | /* | 
|  | 311 | * These two lists require you to hold an additional reference | 
|  | 312 | * while they are on the list. | 
|  | 313 | */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 314 | struct list_head dirty; | 
|  | 315 | struct list_head recovering; // dlm_recovery_ctxt.resources list | 
|  | 316 |  | 
| Sunil Mushran | 29576f8 | 2008-03-10 15:16:21 -0700 | [diff] [blame] | 317 | /* Added during init and removed during release */ | 
|  | 318 | struct list_head tracking;	/* dlm->tracking_list */ | 
|  | 319 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 320 | /* unused lock resources have their last_used stamped and are | 
|  | 321 | * put on a list for the dlm thread to run. */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 322 | unsigned long    last_used; | 
|  | 323 |  | 
| Sunil Mushran | b0d4f81 | 2008-12-16 15:49:22 -0800 | [diff] [blame] | 324 | struct dlm_ctxt *dlm; | 
|  | 325 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 326 | unsigned migration_pending:1; | 
|  | 327 | atomic_t asts_reserved; | 
|  | 328 | spinlock_t spinlock; | 
|  | 329 | wait_queue_head_t wq; | 
|  | 330 | u8  owner;              //node which owns the lock resource, or unknown | 
|  | 331 | u16 state; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 332 | char lvb[DLM_LVB_LEN]; | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 333 | unsigned int inflight_locks; | 
|  | 334 | unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 335 | }; | 
|  | 336 |  | 
|  | 337 | struct dlm_migratable_lock | 
|  | 338 | { | 
|  | 339 | __be64 cookie; | 
|  | 340 |  | 
|  | 341 | /* these 3 are just padding for the in-memory structure, but | 
|  | 342 | * list and flags are actually used when sent over the wire */ | 
|  | 343 | __be16 pad1; | 
|  | 344 | u8 list;  // 0=granted, 1=converting, 2=blocked | 
|  | 345 | u8 flags; | 
|  | 346 |  | 
|  | 347 | s8 type; | 
|  | 348 | s8 convert_type; | 
|  | 349 | s8 highest_blocked; | 
|  | 350 | u8 node; | 
|  | 351 | };  // 16 bytes | 
|  | 352 |  | 
|  | 353 | struct dlm_lock | 
|  | 354 | { | 
|  | 355 | struct dlm_migratable_lock ml; | 
|  | 356 |  | 
|  | 357 | struct list_head list; | 
|  | 358 | struct list_head ast_list; | 
|  | 359 | struct list_head bast_list; | 
|  | 360 | struct dlm_lock_resource *lockres; | 
|  | 361 | spinlock_t spinlock; | 
|  | 362 | struct kref lock_refs; | 
|  | 363 |  | 
|  | 364 | // ast and bast must be callable while holding a spinlock! | 
|  | 365 | dlm_astlockfunc_t *ast; | 
|  | 366 | dlm_bastlockfunc_t *bast; | 
|  | 367 | void *astdata; | 
|  | 368 | struct dlm_lockstatus *lksb; | 
|  | 369 | unsigned ast_pending:1, | 
|  | 370 | bast_pending:1, | 
|  | 371 | convert_pending:1, | 
|  | 372 | lock_pending:1, | 
|  | 373 | cancel_pending:1, | 
|  | 374 | unlock_pending:1, | 
|  | 375 | lksb_kernel_allocated:1; | 
|  | 376 | }; | 
|  | 377 |  | 
|  | 378 |  | 
|  | 379 | #define DLM_LKSB_UNUSED1           0x01 | 
|  | 380 | #define DLM_LKSB_PUT_LVB           0x02 | 
|  | 381 | #define DLM_LKSB_GET_LVB           0x04 | 
|  | 382 | #define DLM_LKSB_UNUSED2           0x08 | 
|  | 383 | #define DLM_LKSB_UNUSED3           0x10 | 
|  | 384 | #define DLM_LKSB_UNUSED4           0x20 | 
|  | 385 | #define DLM_LKSB_UNUSED5           0x40 | 
|  | 386 | #define DLM_LKSB_UNUSED6           0x80 | 
|  | 387 |  | 
|  | 388 |  | 
|  | 389 | enum dlm_lockres_list { | 
|  | 390 | DLM_GRANTED_LIST = 0, | 
|  | 391 | DLM_CONVERTING_LIST, | 
|  | 392 | DLM_BLOCKED_LIST | 
|  | 393 | }; | 
|  | 394 |  | 
| Kurt Hackel | 8bc674c | 2006-04-27 18:02:10 -0700 | [diff] [blame] | 395 | static inline int dlm_lvb_is_empty(char *lvb) | 
|  | 396 | { | 
|  | 397 | int i; | 
|  | 398 | for (i=0; i<DLM_LVB_LEN; i++) | 
|  | 399 | if (lvb[i]) | 
|  | 400 | return 0; | 
|  | 401 | return 1; | 
|  | 402 | } | 
|  | 403 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 404 | static inline struct list_head * | 
|  | 405 | dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) | 
|  | 406 | { | 
|  | 407 | struct list_head *ret = NULL; | 
|  | 408 | if (idx == DLM_GRANTED_LIST) | 
|  | 409 | ret = &res->granted; | 
|  | 410 | else if (idx == DLM_CONVERTING_LIST) | 
|  | 411 | ret = &res->converting; | 
|  | 412 | else if (idx == DLM_BLOCKED_LIST) | 
|  | 413 | ret = &res->blocked; | 
|  | 414 | else | 
|  | 415 | BUG(); | 
|  | 416 | return ret; | 
|  | 417 | } | 
|  | 418 |  | 
|  | 419 |  | 
|  | 420 |  | 
|  | 421 |  | 
|  | 422 | struct dlm_node_iter | 
|  | 423 | { | 
|  | 424 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 
|  | 425 | int curnode; | 
|  | 426 | }; | 
|  | 427 |  | 
|  | 428 |  | 
|  | 429 | enum { | 
|  | 430 | DLM_MASTER_REQUEST_MSG    = 500, | 
|  | 431 | DLM_UNUSED_MSG1,         /* 501 */ | 
|  | 432 | DLM_ASSERT_MASTER_MSG,	 /* 502 */ | 
|  | 433 | DLM_CREATE_LOCK_MSG,	 /* 503 */ | 
|  | 434 | DLM_CONVERT_LOCK_MSG,	 /* 504 */ | 
|  | 435 | DLM_PROXY_AST_MSG,	 /* 505 */ | 
|  | 436 | DLM_UNLOCK_LOCK_MSG,	 /* 506 */ | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 437 | DLM_DEREF_LOCKRES_MSG,	 /* 507 */ | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 438 | DLM_MIGRATE_REQUEST_MSG, /* 508 */ | 
|  | 439 | DLM_MIG_LOCKRES_MSG, 	 /* 509 */ | 
|  | 440 | DLM_QUERY_JOIN_MSG,	 /* 510 */ | 
|  | 441 | DLM_ASSERT_JOINED_MSG,	 /* 511 */ | 
|  | 442 | DLM_CANCEL_JOIN_MSG,	 /* 512 */ | 
|  | 443 | DLM_EXIT_DOMAIN_MSG,	 /* 513 */ | 
|  | 444 | DLM_MASTER_REQUERY_MSG,	 /* 514 */ | 
|  | 445 | DLM_LOCK_REQUEST_MSG,	 /* 515 */ | 
|  | 446 | DLM_RECO_DATA_DONE_MSG,	 /* 516 */ | 
|  | 447 | DLM_BEGIN_RECO_MSG,	 /* 517 */ | 
|  | 448 | DLM_FINALIZE_RECO_MSG	 /* 518 */ | 
|  | 449 | }; | 
|  | 450 |  | 
|  | 451 | struct dlm_reco_node_data | 
|  | 452 | { | 
|  | 453 | int state; | 
|  | 454 | u8 node_num; | 
|  | 455 | struct list_head list; | 
|  | 456 | }; | 
|  | 457 |  | 
|  | 458 | enum { | 
|  | 459 | DLM_RECO_NODE_DATA_DEAD = -1, | 
|  | 460 | DLM_RECO_NODE_DATA_INIT = 0, | 
|  | 461 | DLM_RECO_NODE_DATA_REQUESTING, | 
|  | 462 | DLM_RECO_NODE_DATA_REQUESTED, | 
|  | 463 | DLM_RECO_NODE_DATA_RECEIVING, | 
|  | 464 | DLM_RECO_NODE_DATA_DONE, | 
|  | 465 | DLM_RECO_NODE_DATA_FINALIZE_SENT, | 
|  | 466 | }; | 
|  | 467 |  | 
|  | 468 |  | 
|  | 469 | enum { | 
|  | 470 | DLM_MASTER_RESP_NO = 0, | 
|  | 471 | DLM_MASTER_RESP_YES, | 
|  | 472 | DLM_MASTER_RESP_MAYBE, | 
|  | 473 | DLM_MASTER_RESP_ERROR | 
|  | 474 | }; | 
|  | 475 |  | 
|  | 476 |  | 
|  | 477 | struct dlm_master_request | 
|  | 478 | { | 
|  | 479 | u8 node_idx; | 
|  | 480 | u8 namelen; | 
|  | 481 | __be16 pad1; | 
|  | 482 | __be32 flags; | 
|  | 483 |  | 
|  | 484 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 485 | }; | 
|  | 486 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 487 | #define DLM_ASSERT_RESPONSE_REASSERT       0x00000001 | 
|  | 488 | #define DLM_ASSERT_RESPONSE_MASTERY_REF    0x00000002 | 
|  | 489 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 490 | #define DLM_ASSERT_MASTER_MLE_CLEANUP      0x00000001 | 
|  | 491 | #define DLM_ASSERT_MASTER_REQUERY          0x00000002 | 
|  | 492 | #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004 | 
|  | 493 | struct dlm_assert_master | 
|  | 494 | { | 
|  | 495 | u8 node_idx; | 
|  | 496 | u8 namelen; | 
|  | 497 | __be16 pad1; | 
|  | 498 | __be32 flags; | 
|  | 499 |  | 
|  | 500 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 501 | }; | 
|  | 502 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 503 | #define DLM_MIGRATE_RESPONSE_MASTERY_REF   0x00000001 | 
|  | 504 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 505 | struct dlm_migrate_request | 
|  | 506 | { | 
|  | 507 | u8 master; | 
|  | 508 | u8 new_master; | 
|  | 509 | u8 namelen; | 
|  | 510 | u8 pad1; | 
|  | 511 | __be32 pad2; | 
|  | 512 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 513 | }; | 
|  | 514 |  | 
|  | 515 | struct dlm_master_requery | 
|  | 516 | { | 
|  | 517 | u8 pad1; | 
|  | 518 | u8 pad2; | 
|  | 519 | u8 node_idx; | 
|  | 520 | u8 namelen; | 
|  | 521 | __be32 pad3; | 
|  | 522 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 523 | }; | 
|  | 524 |  | 
|  | 525 | #define DLM_MRES_RECOVERY   0x01 | 
|  | 526 | #define DLM_MRES_MIGRATION  0x02 | 
|  | 527 | #define DLM_MRES_ALL_DONE   0x04 | 
|  | 528 |  | 
|  | 529 | /* | 
|  | 530 | * We would like to get one whole lockres into a single network | 
|  | 531 | * message whenever possible.  Generally speaking, there will be | 
|  | 532 | * at most one dlm_lock on a lockres for each node in the cluster, | 
|  | 533 | * plus (infrequently) any additional locks coming in from userdlm. | 
|  | 534 | * | 
|  | 535 | * struct _dlm_lockres_page | 
|  | 536 | * { | 
|  | 537 | * 	dlm_migratable_lockres mres; | 
|  | 538 | * 	dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS]; | 
|  | 539 | * 	u8 pad[DLM_MIG_LOCKRES_RESERVED]; | 
|  | 540 | * }; | 
|  | 541 | * | 
|  | 542 | * from ../cluster/tcp.h | 
|  | 543 | *    NET_MAX_PAYLOAD_BYTES  (4096 - sizeof(net_msg)) | 
|  | 544 | *    (roughly 4080 bytes) | 
|  | 545 | * and sizeof(dlm_migratable_lockres) = 112 bytes | 
|  | 546 | * and sizeof(dlm_migratable_lock) = 16 bytes | 
|  | 547 | * | 
|  | 548 | * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and | 
|  | 549 | * DLM_MIG_LOCKRES_RESERVED=128 means we have this: | 
|  | 550 | * | 
|  | 551 | *  (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) + | 
|  | 552 | *     sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED = | 
|  | 553 | *        NET_MAX_PAYLOAD_BYTES | 
|  | 554 | *  (240 * 16) + 112 + 128 = 4080 | 
|  | 555 | * | 
|  | 556 | * So a lockres would need more than 240 locks before it would | 
|  | 557 | * use more than one network packet to recover.  Not too bad. | 
|  | 558 | */ | 
|  | 559 | #define DLM_MAX_MIGRATABLE_LOCKS   240 | 
|  | 560 |  | 
|  | 561 | struct dlm_migratable_lockres | 
|  | 562 | { | 
|  | 563 | u8 master; | 
|  | 564 | u8 lockname_len; | 
|  | 565 | u8 num_locks;    // locks sent in this structure | 
|  | 566 | u8 flags; | 
|  | 567 | __be32 total_locks; // locks to be sent for this migration cookie | 
|  | 568 | __be64 mig_cookie;  // cookie for this lockres migration | 
|  | 569 | // or zero if not needed | 
|  | 570 | // 16 bytes | 
|  | 571 | u8 lockname[DLM_LOCKID_NAME_MAX]; | 
|  | 572 | // 48 bytes | 
|  | 573 | u8 lvb[DLM_LVB_LEN]; | 
|  | 574 | // 112 bytes | 
|  | 575 | struct dlm_migratable_lock ml[0];  // 16 bytes each, begins at byte 112 | 
|  | 576 | }; | 
|  | 577 | #define DLM_MIG_LOCKRES_MAX_LEN  \ | 
|  | 578 | (sizeof(struct dlm_migratable_lockres) + \ | 
|  | 579 | (sizeof(struct dlm_migratable_lock) * \ | 
|  | 580 | DLM_MAX_MIGRATABLE_LOCKS) ) | 
|  | 581 |  | 
|  | 582 | /* from above, 128 bytes | 
|  | 583 | * for some undetermined future use */ | 
|  | 584 | #define DLM_MIG_LOCKRES_RESERVED   (NET_MAX_PAYLOAD_BYTES - \ | 
|  | 585 | DLM_MIG_LOCKRES_MAX_LEN) | 
|  | 586 |  | 
|  | 587 | struct dlm_create_lock | 
|  | 588 | { | 
|  | 589 | __be64 cookie; | 
|  | 590 |  | 
|  | 591 | __be32 flags; | 
|  | 592 | u8 pad1; | 
|  | 593 | u8 node_idx; | 
|  | 594 | s8 requested_type; | 
|  | 595 | u8 namelen; | 
|  | 596 |  | 
|  | 597 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 598 | }; | 
|  | 599 |  | 
|  | 600 | struct dlm_convert_lock | 
|  | 601 | { | 
|  | 602 | __be64 cookie; | 
|  | 603 |  | 
|  | 604 | __be32 flags; | 
|  | 605 | u8 pad1; | 
|  | 606 | u8 node_idx; | 
|  | 607 | s8 requested_type; | 
|  | 608 | u8 namelen; | 
|  | 609 |  | 
|  | 610 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 611 |  | 
|  | 612 | s8 lvb[0]; | 
|  | 613 | }; | 
|  | 614 | #define DLM_CONVERT_LOCK_MAX_LEN  (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN) | 
|  | 615 |  | 
|  | 616 | struct dlm_unlock_lock | 
|  | 617 | { | 
|  | 618 | __be64 cookie; | 
|  | 619 |  | 
|  | 620 | __be32 flags; | 
|  | 621 | __be16 pad1; | 
|  | 622 | u8 node_idx; | 
|  | 623 | u8 namelen; | 
|  | 624 |  | 
|  | 625 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 626 |  | 
|  | 627 | s8 lvb[0]; | 
|  | 628 | }; | 
|  | 629 | #define DLM_UNLOCK_LOCK_MAX_LEN  (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN) | 
|  | 630 |  | 
|  | 631 | struct dlm_proxy_ast | 
|  | 632 | { | 
|  | 633 | __be64 cookie; | 
|  | 634 |  | 
|  | 635 | __be32 flags; | 
|  | 636 | u8 node_idx; | 
|  | 637 | u8 type; | 
|  | 638 | u8 blocked_type; | 
|  | 639 | u8 namelen; | 
|  | 640 |  | 
|  | 641 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 642 |  | 
|  | 643 | s8 lvb[0]; | 
|  | 644 | }; | 
|  | 645 | #define DLM_PROXY_AST_MAX_LEN  (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) | 
|  | 646 |  | 
|  | 647 | #define DLM_MOD_KEY (0x666c6172) | 
| Joel Becker | d24fbcd | 2008-01-25 17:02:21 -0800 | [diff] [blame] | 648 | enum dlm_query_join_response_code { | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 649 | JOIN_DISALLOW = 0, | 
|  | 650 | JOIN_OK, | 
|  | 651 | JOIN_OK_NO_MAP, | 
| Joel Becker | d24fbcd | 2008-01-25 17:02:21 -0800 | [diff] [blame] | 652 | JOIN_PROTOCOL_MISMATCH, | 
|  | 653 | }; | 
|  | 654 |  | 
| Joel Becker | 0f71b7b | 2008-02-12 14:56:25 -0800 | [diff] [blame] | 655 | struct dlm_query_join_packet { | 
|  | 656 | u8 code;	/* Response code.  dlm_minor and fs_minor | 
|  | 657 | are only valid if this is JOIN_OK */ | 
|  | 658 | u8 dlm_minor;	/* The minor version of the protocol the | 
|  | 659 | dlm is speaking. */ | 
|  | 660 | u8 fs_minor;	/* The minor version of the protocol the | 
|  | 661 | filesystem is speaking. */ | 
|  | 662 | u8 reserved; | 
|  | 663 | }; | 
|  | 664 |  | 
| Joel Becker | d24fbcd | 2008-01-25 17:02:21 -0800 | [diff] [blame] | 665 | union dlm_query_join_response { | 
|  | 666 | u32 intval; | 
| Joel Becker | 0f71b7b | 2008-02-12 14:56:25 -0800 | [diff] [blame] | 667 | struct dlm_query_join_packet packet; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 668 | }; | 
|  | 669 |  | 
|  | 670 | struct dlm_lock_request | 
|  | 671 | { | 
|  | 672 | u8 node_idx; | 
|  | 673 | u8 dead_node; | 
|  | 674 | __be16 pad1; | 
|  | 675 | __be32 pad2; | 
|  | 676 | }; | 
|  | 677 |  | 
|  | 678 | struct dlm_reco_data_done | 
|  | 679 | { | 
|  | 680 | u8 node_idx; | 
|  | 681 | u8 dead_node; | 
|  | 682 | __be16 pad1; | 
|  | 683 | __be32 pad2; | 
|  | 684 |  | 
|  | 685 | /* unused for now */ | 
|  | 686 | /* eventually we can use this to attempt | 
|  | 687 | * lvb recovery based on each node's info */ | 
|  | 688 | u8 reco_lvb[DLM_LVB_LEN]; | 
|  | 689 | }; | 
|  | 690 |  | 
|  | 691 | struct dlm_begin_reco | 
|  | 692 | { | 
|  | 693 | u8 node_idx; | 
|  | 694 | u8 dead_node; | 
|  | 695 | __be16 pad1; | 
|  | 696 | __be32 pad2; | 
|  | 697 | }; | 
|  | 698 |  | 
|  | 699 |  | 
| Srinivas Eeda | 1faf289 | 2007-01-29 15:31:35 -0800 | [diff] [blame] | 700 | #define BITS_PER_BYTE 8 | 
|  | 701 | #define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE) | 
|  | 702 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 703 | struct dlm_query_join_request | 
|  | 704 | { | 
|  | 705 | u8 node_idx; | 
|  | 706 | u8 pad1[2]; | 
|  | 707 | u8 name_len; | 
| Joel Becker | d24fbcd | 2008-01-25 17:02:21 -0800 | [diff] [blame] | 708 | struct dlm_protocol_version dlm_proto; | 
|  | 709 | struct dlm_protocol_version fs_proto; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 710 | u8 domain[O2NM_MAX_NAME_LEN]; | 
| Srinivas Eeda | 1faf289 | 2007-01-29 15:31:35 -0800 | [diff] [blame] | 711 | u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)]; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 712 | }; | 
|  | 713 |  | 
|  | 714 | struct dlm_assert_joined | 
|  | 715 | { | 
|  | 716 | u8 node_idx; | 
|  | 717 | u8 pad1[2]; | 
|  | 718 | u8 name_len; | 
|  | 719 | u8 domain[O2NM_MAX_NAME_LEN]; | 
|  | 720 | }; | 
|  | 721 |  | 
|  | 722 | struct dlm_cancel_join | 
|  | 723 | { | 
|  | 724 | u8 node_idx; | 
|  | 725 | u8 pad1[2]; | 
|  | 726 | u8 name_len; | 
|  | 727 | u8 domain[O2NM_MAX_NAME_LEN]; | 
|  | 728 | }; | 
|  | 729 |  | 
|  | 730 | struct dlm_exit_domain | 
|  | 731 | { | 
|  | 732 | u8 node_idx; | 
|  | 733 | u8 pad1[3]; | 
|  | 734 | }; | 
|  | 735 |  | 
|  | 736 | struct dlm_finalize_reco | 
|  | 737 | { | 
|  | 738 | u8 node_idx; | 
|  | 739 | u8 dead_node; | 
| Kurt Hackel | 466d1a4 | 2006-05-01 11:11:13 -0700 | [diff] [blame] | 740 | u8 flags; | 
|  | 741 | u8 pad1; | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 742 | __be32 pad2; | 
|  | 743 | }; | 
|  | 744 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 745 | struct dlm_deref_lockres | 
|  | 746 | { | 
|  | 747 | u32 pad1; | 
|  | 748 | u16 pad2; | 
|  | 749 | u8 node_idx; | 
|  | 750 | u8 namelen; | 
|  | 751 |  | 
|  | 752 | u8 name[O2NM_MAX_NAME_LEN]; | 
|  | 753 | }; | 
|  | 754 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 755 | static inline enum dlm_status | 
|  | 756 | __dlm_lockres_state_to_status(struct dlm_lock_resource *res) | 
|  | 757 | { | 
|  | 758 | enum dlm_status status = DLM_NORMAL; | 
|  | 759 |  | 
|  | 760 | assert_spin_locked(&res->spinlock); | 
|  | 761 |  | 
|  | 762 | if (res->state & DLM_LOCK_RES_RECOVERING) | 
|  | 763 | status = DLM_RECOVERING; | 
|  | 764 | else if (res->state & DLM_LOCK_RES_MIGRATING) | 
|  | 765 | status = DLM_MIGRATING; | 
|  | 766 | else if (res->state & DLM_LOCK_RES_IN_PROGRESS) | 
|  | 767 | status = DLM_FORWARD; | 
|  | 768 |  | 
|  | 769 | return status; | 
|  | 770 | } | 
|  | 771 |  | 
| Kurt Hackel | 2900485 | 2006-03-02 16:43:36 -0800 | [diff] [blame] | 772 | static inline u8 dlm_get_lock_cookie_node(u64 cookie) | 
|  | 773 | { | 
|  | 774 | u8 ret; | 
|  | 775 | cookie >>= 56; | 
|  | 776 | ret = (u8)(cookie & 0xffULL); | 
|  | 777 | return ret; | 
|  | 778 | } | 
|  | 779 |  | 
|  | 780 | static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie) | 
|  | 781 | { | 
|  | 782 | unsigned long long ret; | 
|  | 783 | ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL; | 
|  | 784 | return ret; | 
|  | 785 | } | 
|  | 786 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 787 | struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, | 
|  | 788 | struct dlm_lockstatus *lksb); | 
|  | 789 | void dlm_lock_get(struct dlm_lock *lock); | 
|  | 790 | void dlm_lock_put(struct dlm_lock *lock); | 
|  | 791 |  | 
|  | 792 | void dlm_lock_attach_lockres(struct dlm_lock *lock, | 
|  | 793 | struct dlm_lock_resource *res); | 
|  | 794 |  | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 795 | int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 796 | void **ret_data); | 
|  | 797 | int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 798 | void **ret_data); | 
|  | 799 | int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 800 | void **ret_data); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 801 |  | 
|  | 802 | void dlm_revert_pending_convert(struct dlm_lock_resource *res, | 
|  | 803 | struct dlm_lock *lock); | 
|  | 804 | void dlm_revert_pending_lock(struct dlm_lock_resource *res, | 
|  | 805 | struct dlm_lock *lock); | 
|  | 806 |  | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 807 | int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 808 | void **ret_data); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 809 | void dlm_commit_pending_cancel(struct dlm_lock_resource *res, | 
|  | 810 | struct dlm_lock *lock); | 
|  | 811 | void dlm_commit_pending_unlock(struct dlm_lock_resource *res, | 
|  | 812 | struct dlm_lock *lock); | 
|  | 813 |  | 
|  | 814 | int dlm_launch_thread(struct dlm_ctxt *dlm); | 
|  | 815 | void dlm_complete_thread(struct dlm_ctxt *dlm); | 
|  | 816 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); | 
|  | 817 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | 
|  | 818 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm); | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 819 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); | 
| Kurt Hackel | e2faea4 | 2006-01-12 14:24:55 -0800 | [diff] [blame] | 820 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); | 
| Kurt Hackel | 44465a7 | 2006-01-18 17:05:38 -0800 | [diff] [blame] | 821 | int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); | 
| Kurt Hackel | b7084ab | 2006-05-01 13:54:07 -0700 | [diff] [blame] | 822 | int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 823 |  | 
|  | 824 | void dlm_put(struct dlm_ctxt *dlm); | 
|  | 825 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); | 
|  | 826 | int dlm_domain_fully_joined(struct dlm_ctxt *dlm); | 
|  | 827 |  | 
|  | 828 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | 
|  | 829 | struct dlm_lock_resource *res); | 
|  | 830 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | 
|  | 831 | struct dlm_lock_resource *res); | 
| Mark Fasheh | 95c4f58 | 2006-03-10 13:44:00 -0800 | [diff] [blame] | 832 | static inline void dlm_lockres_get(struct dlm_lock_resource *res) | 
|  | 833 | { | 
|  | 834 | /* This is called on every lookup, so it might be worth | 
|  | 835 | * inlining. */ | 
|  | 836 | kref_get(&res->refs); | 
|  | 837 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 838 | void dlm_lockres_put(struct dlm_lock_resource *res); | 
|  | 839 | void __dlm_unhash_lockres(struct dlm_lock_resource *res); | 
|  | 840 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | 
|  | 841 | struct dlm_lock_resource *res); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 842 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, | 
|  | 843 | const char *name, | 
|  | 844 | unsigned int len, | 
|  | 845 | unsigned int hash); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 846 | struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, | 
|  | 847 | const char *name, | 
| Mark Fasheh | a3d3329 | 2006-03-09 17:55:56 -0800 | [diff] [blame] | 848 | unsigned int len, | 
|  | 849 | unsigned int hash); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 850 | struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, | 
|  | 851 | const char *name, | 
|  | 852 | unsigned int len); | 
|  | 853 |  | 
|  | 854 | int dlm_is_host_down(int errno); | 
| Sunil Mushran | 7d62a97 | 2009-02-26 15:00:45 -0800 | [diff] [blame] | 855 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 856 | struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | 
|  | 857 | const char *lockid, | 
| Mark Fasheh | 3384f3d | 2006-09-08 11:38:29 -0700 | [diff] [blame] | 858 | int namelen, | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 859 | int flags); | 
|  | 860 | struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | 
|  | 861 | const char *name, | 
|  | 862 | unsigned int namelen); | 
|  | 863 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 864 | #define dlm_lockres_set_refmap_bit(bit,res)  \ | 
|  | 865 | __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__) | 
|  | 866 | #define dlm_lockres_clear_refmap_bit(bit,res)  \ | 
|  | 867 | __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__) | 
|  | 868 |  | 
|  | 869 | static inline void __dlm_lockres_set_refmap_bit(int bit, | 
|  | 870 | struct dlm_lock_resource *res, | 
|  | 871 | const char *file, | 
|  | 872 | int line) | 
|  | 873 | { | 
|  | 874 | //printk("%s:%d:%.*s: setting bit %d\n", file, line, | 
|  | 875 | //     res->lockname.len, res->lockname.name, bit); | 
|  | 876 | set_bit(bit, res->refmap); | 
|  | 877 | } | 
|  | 878 |  | 
|  | 879 | static inline void __dlm_lockres_clear_refmap_bit(int bit, | 
|  | 880 | struct dlm_lock_resource *res, | 
|  | 881 | const char *file, | 
|  | 882 | int line) | 
|  | 883 | { | 
|  | 884 | //printk("%s:%d:%.*s: clearing bit %d\n", file, line, | 
|  | 885 | //     res->lockname.len, res->lockname.name, bit); | 
|  | 886 | clear_bit(bit, res->refmap); | 
|  | 887 | } | 
|  | 888 |  | 
|  | 889 | void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, | 
|  | 890 | struct dlm_lock_resource *res, | 
|  | 891 | const char *file, | 
|  | 892 | int line); | 
|  | 893 | void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | 
|  | 894 | struct dlm_lock_resource *res, | 
|  | 895 | int new_lockres, | 
|  | 896 | const char *file, | 
|  | 897 | int line); | 
|  | 898 | #define dlm_lockres_drop_inflight_ref(d,r)  \ | 
|  | 899 | __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__) | 
|  | 900 | #define dlm_lockres_grab_inflight_ref(d,r)  \ | 
|  | 901 | __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__) | 
|  | 902 | #define dlm_lockres_grab_inflight_ref_new(d,r)  \ | 
|  | 903 | __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__) | 
|  | 904 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 905 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 
|  | 906 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 
| Wengang Wang | d9ef752 | 2010-05-17 20:20:44 +0800 | [diff] [blame] | 907 | void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 
|  | 908 | void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 909 | void dlm_do_local_ast(struct dlm_ctxt *dlm, | 
|  | 910 | struct dlm_lock_resource *res, | 
|  | 911 | struct dlm_lock *lock); | 
|  | 912 | int dlm_do_remote_ast(struct dlm_ctxt *dlm, | 
|  | 913 | struct dlm_lock_resource *res, | 
|  | 914 | struct dlm_lock *lock); | 
|  | 915 | void dlm_do_local_bast(struct dlm_ctxt *dlm, | 
|  | 916 | struct dlm_lock_resource *res, | 
|  | 917 | struct dlm_lock *lock, | 
|  | 918 | int blocked_type); | 
|  | 919 | int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, | 
|  | 920 | struct dlm_lock_resource *res, | 
|  | 921 | struct dlm_lock *lock, | 
|  | 922 | int msg_type, | 
|  | 923 | int blocked_type, int flags); | 
|  | 924 | static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm, | 
|  | 925 | struct dlm_lock_resource *res, | 
|  | 926 | struct dlm_lock *lock, | 
|  | 927 | int blocked_type) | 
|  | 928 | { | 
|  | 929 | return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST, | 
|  | 930 | blocked_type, 0); | 
|  | 931 | } | 
|  | 932 |  | 
|  | 933 | static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm, | 
|  | 934 | struct dlm_lock_resource *res, | 
|  | 935 | struct dlm_lock *lock, | 
|  | 936 | int flags) | 
|  | 937 | { | 
|  | 938 | return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST, | 
|  | 939 | 0, flags); | 
|  | 940 | } | 
|  | 941 |  | 
|  | 942 | void dlm_print_one_lock_resource(struct dlm_lock_resource *res); | 
|  | 943 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res); | 
|  | 944 |  | 
|  | 945 | u8 dlm_nm_this_node(struct dlm_ctxt *dlm); | 
|  | 946 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | 
|  | 947 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | 
|  | 948 |  | 
|  | 949 |  | 
|  | 950 | int dlm_nm_init(struct dlm_ctxt *dlm); | 
|  | 951 | int dlm_heartbeat_init(struct dlm_ctxt *dlm); | 
|  | 952 | void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); | 
|  | 953 | void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); | 
|  | 954 |  | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 955 | int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 956 | int dlm_finish_migration(struct dlm_ctxt *dlm, | 
|  | 957 | struct dlm_lock_resource *res, | 
|  | 958 | u8 old_master); | 
|  | 959 | void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | 
|  | 960 | struct dlm_lock_resource *res); | 
|  | 961 | void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res); | 
|  | 962 |  | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 963 | int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 964 | void **ret_data); | 
|  | 965 | int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 966 | void **ret_data); | 
| Kurt Hackel | 3b8118c | 2007-01-17 17:05:53 -0800 | [diff] [blame] | 967 | void dlm_assert_master_post_handler(int status, void *data, void *ret_data); | 
| Kurt Hackel | d74c980 | 2007-01-17 17:04:25 -0800 | [diff] [blame] | 968 | int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 969 | void **ret_data); | 
|  | 970 | int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 971 | void **ret_data); | 
|  | 972 | int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 973 | void **ret_data); | 
|  | 974 | int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 975 | void **ret_data); | 
|  | 976 | int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 977 | void **ret_data); | 
|  | 978 | int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 979 | void **ret_data); | 
|  | 980 | int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 981 | void **ret_data); | 
|  | 982 | int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, | 
|  | 983 | void **ret_data); | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 984 | int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | 
|  | 985 | u8 nodenum, u8 *real_master); | 
| Kurt Hackel | c03872f | 2006-03-06 14:08:49 -0800 | [diff] [blame] | 986 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 987 |  | 
|  | 988 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, | 
|  | 989 | struct dlm_lock_resource *res, | 
|  | 990 | int ignore_higher, | 
|  | 991 | u8 request_from, | 
|  | 992 | u32 flags); | 
|  | 993 |  | 
|  | 994 |  | 
|  | 995 | int dlm_send_one_lockres(struct dlm_ctxt *dlm, | 
|  | 996 | struct dlm_lock_resource *res, | 
|  | 997 | struct dlm_migratable_lockres *mres, | 
|  | 998 | u8 send_to, | 
|  | 999 | u8 flags); | 
|  | 1000 | void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, | 
|  | 1001 | struct dlm_lock_resource *res); | 
|  | 1002 |  | 
|  | 1003 | /* will exit holding res->spinlock, but may drop in function */ | 
|  | 1004 | void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags); | 
|  | 1005 | void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags); | 
|  | 1006 |  | 
|  | 1007 | /* will exit holding res->spinlock, but may drop in function */ | 
|  | 1008 | static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res) | 
|  | 1009 | { | 
|  | 1010 | __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS| | 
|  | 1011 | DLM_LOCK_RES_RECOVERING| | 
|  | 1012 | DLM_LOCK_RES_MIGRATING)); | 
|  | 1013 | } | 
|  | 1014 |  | 
| Sunil Mushran | 1c08457 | 2009-02-26 15:00:37 -0800 | [diff] [blame] | 1015 | void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle); | 
|  | 1016 | void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle); | 
|  | 1017 |  | 
| Sunil Mushran | 724bdca | 2008-03-10 15:16:20 -0700 | [diff] [blame] | 1018 | /* create/destroy slab caches */ | 
|  | 1019 | int dlm_init_master_caches(void); | 
|  | 1020 | void dlm_destroy_master_caches(void); | 
|  | 1021 |  | 
|  | 1022 | int dlm_init_lock_cache(void); | 
|  | 1023 | void dlm_destroy_lock_cache(void); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1024 |  | 
|  | 1025 | int dlm_init_mle_cache(void); | 
|  | 1026 | void dlm_destroy_mle_cache(void); | 
| Sunil Mushran | 724bdca | 2008-03-10 15:16:20 -0700 | [diff] [blame] | 1027 |  | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1028 | void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1029 | int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, | 
|  | 1030 | struct dlm_lock_resource *res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1031 | void dlm_clean_master_list(struct dlm_ctxt *dlm, | 
|  | 1032 | u8 dead_node); | 
| Srinivas Eeda | 5dad6c3 | 2010-09-21 16:27:26 -0700 | [diff] [blame] | 1033 | void dlm_force_free_mles(struct dlm_ctxt *dlm); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1034 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 
| Kurt Hackel | ba2bf21 | 2006-12-01 14:47:20 -0800 | [diff] [blame] | 1035 | int __dlm_lockres_has_locks(struct dlm_lock_resource *res); | 
| Kurt Hackel | 69d72b0 | 2006-05-01 10:57:51 -0700 | [diff] [blame] | 1036 | int __dlm_lockres_unused(struct dlm_lock_resource *res); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1037 |  | 
|  | 1038 | static inline const char * dlm_lock_mode_name(int mode) | 
|  | 1039 | { | 
|  | 1040 | switch (mode) { | 
|  | 1041 | case LKM_EXMODE: | 
|  | 1042 | return "EX"; | 
|  | 1043 | case LKM_PRMODE: | 
|  | 1044 | return "PR"; | 
|  | 1045 | case LKM_NLMODE: | 
|  | 1046 | return "NL"; | 
|  | 1047 | } | 
|  | 1048 | return "UNKNOWN"; | 
|  | 1049 | } | 
|  | 1050 |  | 
|  | 1051 |  | 
|  | 1052 | static inline int dlm_lock_compatible(int existing, int request) | 
|  | 1053 | { | 
|  | 1054 | /* NO_LOCK compatible with all */ | 
|  | 1055 | if (request == LKM_NLMODE || | 
|  | 1056 | existing == LKM_NLMODE) | 
|  | 1057 | return 1; | 
|  | 1058 |  | 
|  | 1059 | /* EX incompatible with all non-NO_LOCK */ | 
|  | 1060 | if (request == LKM_EXMODE) | 
|  | 1061 | return 0; | 
|  | 1062 |  | 
|  | 1063 | /* request must be PR, which is compatible with PR */ | 
|  | 1064 | if (existing == LKM_PRMODE) | 
|  | 1065 | return 1; | 
|  | 1066 |  | 
|  | 1067 | return 0; | 
|  | 1068 | } | 
|  | 1069 |  | 
|  | 1070 | static inline int dlm_lock_on_list(struct list_head *head, | 
|  | 1071 | struct dlm_lock *lock) | 
|  | 1072 | { | 
|  | 1073 | struct list_head *iter; | 
|  | 1074 | struct dlm_lock *tmplock; | 
|  | 1075 |  | 
|  | 1076 | list_for_each(iter, head) { | 
|  | 1077 | tmplock = list_entry(iter, struct dlm_lock, list); | 
|  | 1078 | if (tmplock == lock) | 
|  | 1079 | return 1; | 
|  | 1080 | } | 
|  | 1081 | return 0; | 
|  | 1082 | } | 
|  | 1083 |  | 
|  | 1084 |  | 
|  | 1085 | static inline enum dlm_status dlm_err_to_dlm_status(int err) | 
|  | 1086 | { | 
|  | 1087 | enum dlm_status ret; | 
|  | 1088 | if (err == -ENOMEM) | 
|  | 1089 | ret = DLM_SYSERR; | 
|  | 1090 | else if (err == -ETIMEDOUT || o2net_link_down(err, NULL)) | 
|  | 1091 | ret = DLM_NOLOCKMGR; | 
|  | 1092 | else if (err == -EINVAL) | 
|  | 1093 | ret = DLM_BADPARAM; | 
|  | 1094 | else if (err == -ENAMETOOLONG) | 
|  | 1095 | ret = DLM_IVBUFLEN; | 
|  | 1096 | else | 
|  | 1097 | ret = DLM_BADARGS; | 
|  | 1098 | return ret; | 
|  | 1099 | } | 
|  | 1100 |  | 
|  | 1101 |  | 
|  | 1102 | static inline void dlm_node_iter_init(unsigned long *map, | 
|  | 1103 | struct dlm_node_iter *iter) | 
|  | 1104 | { | 
|  | 1105 | memcpy(iter->node_map, map, sizeof(iter->node_map)); | 
|  | 1106 | iter->curnode = -1; | 
|  | 1107 | } | 
|  | 1108 |  | 
|  | 1109 | static inline int dlm_node_iter_next(struct dlm_node_iter *iter) | 
|  | 1110 | { | 
|  | 1111 | int bit; | 
|  | 1112 | bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); | 
|  | 1113 | if (bit >= O2NM_MAX_NODES) { | 
|  | 1114 | iter->curnode = O2NM_MAX_NODES; | 
|  | 1115 | return -ENOENT; | 
|  | 1116 | } | 
|  | 1117 | iter->curnode = bit; | 
|  | 1118 | return bit; | 
|  | 1119 | } | 
|  | 1120 |  | 
| Sunil Mushran | 7d62a97 | 2009-02-26 15:00:45 -0800 | [diff] [blame] | 1121 | static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm, | 
|  | 1122 | struct dlm_lock_resource *res, | 
|  | 1123 | u8 owner) | 
|  | 1124 | { | 
|  | 1125 | assert_spin_locked(&res->spinlock); | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1126 |  | 
| Sunil Mushran | 7d62a97 | 2009-02-26 15:00:45 -0800 | [diff] [blame] | 1127 | res->owner = owner; | 
|  | 1128 | } | 
|  | 1129 |  | 
|  | 1130 | static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm, | 
|  | 1131 | struct dlm_lock_resource *res, | 
|  | 1132 | u8 owner) | 
|  | 1133 | { | 
|  | 1134 | assert_spin_locked(&res->spinlock); | 
|  | 1135 |  | 
|  | 1136 | if (owner != res->owner) | 
|  | 1137 | dlm_set_lockres_owner(dlm, res, owner); | 
|  | 1138 | } | 
| Kurt Hackel | 6714d8e | 2005-12-15 14:31:23 -0800 | [diff] [blame] | 1139 |  | 
|  | 1140 | #endif /* DLMCOMMON_H */ |