| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 1 | /* | 
|  | 2 | *  Device operations for the pnfs client. | 
|  | 3 | * | 
|  | 4 | *  Copyright (c) 2002 | 
|  | 5 | *  The Regents of the University of Michigan | 
|  | 6 | *  All Rights Reserved | 
|  | 7 | * | 
|  | 8 | *  Dean Hildebrand <dhildebz@umich.edu> | 
|  | 9 | *  Garth Goodson   <Garth.Goodson@netapp.com> | 
|  | 10 | * | 
|  | 11 | *  Permission is granted to use, copy, create derivative works, and | 
|  | 12 | *  redistribute this software and such derivative works for any purpose, | 
|  | 13 | *  so long as the name of the University of Michigan is not used in | 
|  | 14 | *  any advertising or publicity pertaining to the use or distribution | 
|  | 15 | *  of this software without specific, written prior authorization. If | 
|  | 16 | *  the above copyright notice or any other identification of the | 
|  | 17 | *  University of Michigan is included in any copy of any portion of | 
|  | 18 | *  this software, then the disclaimer below must also be included. | 
|  | 19 | * | 
|  | 20 | *  This software is provided as is, without representation or warranty | 
|  | 21 | *  of any kind either express or implied, including without limitation | 
|  | 22 | *  the implied warranties of merchantability, fitness for a particular | 
|  | 23 | *  purpose, or noninfringement.  The Regents of the University of | 
|  | 24 | *  Michigan shall not be liable for any damages, including special, | 
|  | 25 | *  indirect, incidental, or consequential damages, with respect to any | 
|  | 26 | *  claim arising out of or in connection with the use of the software, | 
|  | 27 | *  even if it has been or is hereafter advised of the possibility of | 
|  | 28 | *  such damages. | 
|  | 29 | */ | 
|  | 30 |  | 
| Paul Gortmaker | afeacc8 | 2011-05-26 16:00:52 -0400 | [diff] [blame] | 31 | #include <linux/export.h> | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 32 | #include "pnfs.h" | 
|  | 33 |  | 
|  | 34 | #define NFSDBG_FACILITY		NFSDBG_PNFS | 
|  | 35 |  | 
|  | 36 | /* | 
|  | 37 | * Device ID RCU cache. A device ID is unique per server and layout type. | 
|  | 38 | */ | 
|  | 39 | #define NFS4_DEVICE_ID_HASH_BITS	5 | 
|  | 40 | #define NFS4_DEVICE_ID_HASH_SIZE	(1 << NFS4_DEVICE_ID_HASH_BITS) | 
|  | 41 | #define NFS4_DEVICE_ID_HASH_MASK	(NFS4_DEVICE_ID_HASH_SIZE - 1) | 
|  | 42 |  | 
|  | 43 | static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE]; | 
|  | 44 | static DEFINE_SPINLOCK(nfs4_deviceid_lock); | 
|  | 45 |  | 
|  | 46 | void | 
|  | 47 | nfs4_print_deviceid(const struct nfs4_deviceid *id) | 
|  | 48 | { | 
|  | 49 | u32 *p = (u32 *)id; | 
|  | 50 |  | 
|  | 51 | dprintk("%s: device id= [%x%x%x%x]\n", __func__, | 
|  | 52 | p[0], p[1], p[2], p[3]); | 
|  | 53 | } | 
|  | 54 | EXPORT_SYMBOL_GPL(nfs4_print_deviceid); | 
|  | 55 |  | 
|  | 56 | static inline u32 | 
|  | 57 | nfs4_deviceid_hash(const struct nfs4_deviceid *id) | 
|  | 58 | { | 
|  | 59 | unsigned char *cptr = (unsigned char *)id->data; | 
|  | 60 | unsigned int nbytes = NFS4_DEVICEID4_SIZE; | 
|  | 61 | u32 x = 0; | 
|  | 62 |  | 
|  | 63 | while (nbytes--) { | 
|  | 64 | x *= 37; | 
|  | 65 | x += *cptr++; | 
|  | 66 | } | 
|  | 67 | return x & NFS4_DEVICE_ID_HASH_MASK; | 
|  | 68 | } | 
|  | 69 |  | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 70 | static struct nfs4_deviceid_node * | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 71 | _lookup_deviceid(const struct pnfs_layoutdriver_type *ld, | 
|  | 72 | const struct nfs_client *clp, const struct nfs4_deviceid *id, | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 73 | long hash) | 
|  | 74 | { | 
|  | 75 | struct nfs4_deviceid_node *d; | 
|  | 76 | struct hlist_node *n; | 
|  | 77 |  | 
|  | 78 | hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 79 | if (d->ld == ld && d->nfs_client == clp && | 
|  | 80 | !memcmp(&d->deviceid, id, sizeof(*id))) { | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 81 | if (atomic_read(&d->ref)) | 
|  | 82 | return d; | 
|  | 83 | else | 
|  | 84 | continue; | 
|  | 85 | } | 
|  | 86 | return NULL; | 
|  | 87 | } | 
|  | 88 |  | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 89 | /* | 
|  | 90 | * Lookup a deviceid in cache and get a reference count on it if found | 
|  | 91 | * | 
|  | 92 | * @clp nfs_client associated with deviceid | 
|  | 93 | * @id deviceid to look up | 
|  | 94 | */ | 
|  | 95 | struct nfs4_deviceid_node * | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 96 | _find_get_deviceid(const struct pnfs_layoutdriver_type *ld, | 
|  | 97 | const struct nfs_client *clp, const struct nfs4_deviceid *id, | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 98 | long hash) | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 99 | { | 
|  | 100 | struct nfs4_deviceid_node *d; | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 101 |  | 
|  | 102 | rcu_read_lock(); | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 103 | d = _lookup_deviceid(ld, clp, id, hash); | 
| Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 104 | if (d != NULL) | 
|  | 105 | atomic_inc(&d->ref); | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 106 | rcu_read_unlock(); | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 107 | return d; | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | struct nfs4_deviceid_node * | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 111 | nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld, | 
|  | 112 | const struct nfs_client *clp, const struct nfs4_deviceid *id) | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 113 | { | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 114 | return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 115 | } | 
|  | 116 | EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); | 
|  | 117 |  | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 118 | /* | 
| Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 119 | * Remove a deviceid from cache | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 120 | * | 
|  | 121 | * @clp nfs_client associated with deviceid | 
|  | 122 | * @id the deviceid to unhash | 
|  | 123 | * | 
|  | 124 | * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise. | 
|  | 125 | */ | 
| Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 126 | void | 
|  | 127 | nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld, | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 128 | const struct nfs_client *clp, const struct nfs4_deviceid *id) | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 129 | { | 
|  | 130 | struct nfs4_deviceid_node *d; | 
|  | 131 |  | 
|  | 132 | spin_lock(&nfs4_deviceid_lock); | 
|  | 133 | rcu_read_lock(); | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 134 | d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 135 | rcu_read_unlock(); | 
|  | 136 | if (!d) { | 
|  | 137 | spin_unlock(&nfs4_deviceid_lock); | 
| Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 138 | return; | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 139 | } | 
|  | 140 | hlist_del_init_rcu(&d->node); | 
|  | 141 | spin_unlock(&nfs4_deviceid_lock); | 
|  | 142 | synchronize_rcu(); | 
|  | 143 |  | 
|  | 144 | /* balance the initial ref set in pnfs_insert_deviceid */ | 
|  | 145 | if (atomic_dec_and_test(&d->ref)) | 
| Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 146 | d->ld->free_deviceid_node(d); | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 147 | } | 
|  | 148 | EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); | 
|  | 149 |  | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 150 | void | 
|  | 151 | nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 152 | const struct pnfs_layoutdriver_type *ld, | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 153 | const struct nfs_client *nfs_client, | 
|  | 154 | const struct nfs4_deviceid *id) | 
|  | 155 | { | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 156 | INIT_HLIST_NODE(&d->node); | 
| Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 157 | INIT_HLIST_NODE(&d->tmpnode); | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 158 | d->ld = ld; | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 159 | d->nfs_client = nfs_client; | 
| Andy Adamson | c47abcf | 2011-06-15 17:52:40 -0400 | [diff] [blame] | 160 | d->flags = 0; | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 161 | d->deviceid = *id; | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 162 | atomic_set(&d->ref, 1); | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 163 | } | 
|  | 164 | EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); | 
|  | 165 |  | 
|  | 166 | /* | 
|  | 167 | * Uniquely initialize and insert a deviceid node into cache | 
|  | 168 | * | 
|  | 169 | * @new new deviceid node | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 170 | *      Note that the caller must set up the following members: | 
|  | 171 | *        new->ld | 
|  | 172 | *        new->nfs_client | 
|  | 173 | *        new->deviceid | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 174 | * | 
|  | 175 | * @ret the inserted node, if none found, otherwise, the found entry. | 
|  | 176 | */ | 
|  | 177 | struct nfs4_deviceid_node * | 
|  | 178 | nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new) | 
|  | 179 | { | 
|  | 180 | struct nfs4_deviceid_node *d; | 
|  | 181 | long hash; | 
|  | 182 |  | 
|  | 183 | spin_lock(&nfs4_deviceid_lock); | 
| Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 184 | hash = nfs4_deviceid_hash(&new->deviceid); | 
| Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 185 | d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash); | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 186 | if (d) { | 
|  | 187 | spin_unlock(&nfs4_deviceid_lock); | 
|  | 188 | return d; | 
|  | 189 | } | 
|  | 190 |  | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 191 | hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); | 
|  | 192 | spin_unlock(&nfs4_deviceid_lock); | 
| Trond Myklebust | 1d92a08 | 2011-06-14 12:07:38 -0400 | [diff] [blame] | 193 | atomic_inc(&new->ref); | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 194 |  | 
|  | 195 | return new; | 
|  | 196 | } | 
|  | 197 | EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node); | 
|  | 198 |  | 
|  | 199 | /* | 
|  | 200 | * Dereference a deviceid node and delete it when its reference count drops | 
|  | 201 | * to zero. | 
|  | 202 | * | 
|  | 203 | * @d deviceid node to put | 
|  | 204 | * | 
| Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 205 | * return true iff the node was deleted | 
|  | 206 | * Note that since the test for d->ref == 0 is sufficient to establish | 
|  | 207 | * that the node is no longer hashed in the global device id cache. | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 208 | */ | 
|  | 209 | bool | 
|  | 210 | nfs4_put_deviceid_node(struct nfs4_deviceid_node *d) | 
|  | 211 | { | 
| Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 212 | if (!atomic_dec_and_test(&d->ref)) | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 213 | return false; | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 214 | d->ld->free_deviceid_node(d); | 
| Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 215 | return true; | 
|  | 216 | } | 
|  | 217 | EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 218 |  | 
|  | 219 | static void | 
|  | 220 | _deviceid_purge_client(const struct nfs_client *clp, long hash) | 
|  | 221 | { | 
|  | 222 | struct nfs4_deviceid_node *d; | 
| Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 223 | struct hlist_node *n; | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 224 | HLIST_HEAD(tmp); | 
|  | 225 |  | 
| Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 226 | spin_lock(&nfs4_deviceid_lock); | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 227 | rcu_read_lock(); | 
|  | 228 | hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) | 
|  | 229 | if (d->nfs_client == clp && atomic_read(&d->ref)) { | 
|  | 230 | hlist_del_init_rcu(&d->node); | 
| Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 231 | hlist_add_head(&d->tmpnode, &tmp); | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 232 | } | 
|  | 233 | rcu_read_unlock(); | 
| Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 234 | spin_unlock(&nfs4_deviceid_lock); | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 235 |  | 
|  | 236 | if (hlist_empty(&tmp)) | 
|  | 237 | return; | 
|  | 238 |  | 
|  | 239 | synchronize_rcu(); | 
| Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 240 | while (!hlist_empty(&tmp)) { | 
|  | 241 | d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode); | 
|  | 242 | hlist_del(&d->tmpnode); | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 243 | if (atomic_dec_and_test(&d->ref)) | 
|  | 244 | d->ld->free_deviceid_node(d); | 
| Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 245 | } | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 246 | } | 
|  | 247 |  | 
|  | 248 | void | 
|  | 249 | nfs4_deviceid_purge_client(const struct nfs_client *clp) | 
|  | 250 | { | 
|  | 251 | long h; | 
|  | 252 |  | 
| Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 253 | if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS)) | 
|  | 254 | return; | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 255 | for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) | 
|  | 256 | _deviceid_purge_client(clp, h); | 
| Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 257 | } | 
| Andy Adamson | c47abcf | 2011-06-15 17:52:40 -0400 | [diff] [blame] | 258 |  | 
|  | 259 | /* | 
|  | 260 | * Stop use of all deviceids associated with an nfs_client | 
|  | 261 | */ | 
|  | 262 | void | 
|  | 263 | nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) | 
|  | 264 | { | 
|  | 265 | struct nfs4_deviceid_node *d; | 
|  | 266 | struct hlist_node *n; | 
|  | 267 | int i; | 
|  | 268 |  | 
|  | 269 | rcu_read_lock(); | 
|  | 270 | for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ | 
|  | 271 | hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node) | 
|  | 272 | if (d->nfs_client == clp) | 
|  | 273 | set_bit(NFS_DEVICEID_INVALID, &d->flags); | 
|  | 274 | } | 
|  | 275 | rcu_read_unlock(); | 
|  | 276 | } |