| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * net/sunrpc/cache.c | 
|  | 3 | * | 
|  | 4 | * Generic code for various authentication-related caches | 
|  | 5 | * used by sunrpc clients and servers. | 
|  | 6 | * | 
|  | 7 | * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> | 
|  | 8 | * | 
|  | 9 | * Released under terms in GPL version 2.  See COPYING. | 
|  | 10 | * | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #include <linux/types.h> | 
|  | 14 | #include <linux/fs.h> | 
|  | 15 | #include <linux/file.h> | 
|  | 16 | #include <linux/slab.h> | 
|  | 17 | #include <linux/signal.h> | 
|  | 18 | #include <linux/sched.h> | 
|  | 19 | #include <linux/kmod.h> | 
|  | 20 | #include <linux/list.h> | 
|  | 21 | #include <linux/module.h> | 
|  | 22 | #include <linux/ctype.h> | 
|  | 23 | #include <asm/uaccess.h> | 
|  | 24 | #include <linux/poll.h> | 
|  | 25 | #include <linux/seq_file.h> | 
|  | 26 | #include <linux/proc_fs.h> | 
|  | 27 | #include <linux/net.h> | 
|  | 28 | #include <linux/workqueue.h> | 
| Arjan van de Ven | 4a3e2f7 | 2006-03-20 22:33:17 -0800 | [diff] [blame] | 29 | #include <linux/mutex.h> | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 30 | #include <linux/pagemap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <asm/ioctls.h> | 
|  | 32 | #include <linux/sunrpc/types.h> | 
|  | 33 | #include <linux/sunrpc/cache.h> | 
|  | 34 | #include <linux/sunrpc/stats.h> | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 35 | #include <linux/sunrpc/rpc_pipe_fs.h> | 
| Pavel Emelyanov | 4f42d0d | 2010-09-27 14:01:58 +0400 | [diff] [blame] | 36 | #include "netns.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
|  | 38 | #define	 RPCDBG_FACILITY RPCDBG_CACHE | 
|  | 39 |  | 
| J. Bruce Fields | d76d181 | 2011-01-02 21:28:34 -0500 | [diff] [blame] | 40 | static bool cache_defer_req(struct cache_req *req, struct cache_head *item); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | static void cache_revisit_request(struct cache_head *item); | 
|  | 42 |  | 
| Adrian Bunk | 74cae61 | 2006-03-27 01:15:10 -0800 | [diff] [blame] | 43 | static void cache_init(struct cache_head *h) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | { | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 45 | time_t now = seconds_since_boot(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | h->next = NULL; | 
|  | 47 | h->flags = 0; | 
| NeilBrown | baab935 | 2006-03-27 01:15:09 -0800 | [diff] [blame] | 48 | kref_init(&h->ref); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | h->expiry_time = now + CACHE_NEW_EXPIRY; | 
|  | 50 | h->last_refresh = now; | 
|  | 51 | } | 
|  | 52 |  | 
| NeilBrown | 2f50d8b | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 53 | static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) | 
|  | 54 | { | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 55 | return  (h->expiry_time < seconds_since_boot()) || | 
| NeilBrown | 2f50d8b | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 56 | (detail->flush_time > h->last_refresh); | 
|  | 57 | } | 
|  | 58 |  | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 59 | struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | 
|  | 60 | struct cache_head *key, int hash) | 
|  | 61 | { | 
|  | 62 | struct cache_head **head,  **hp; | 
| NeilBrown | d202cce | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 63 | struct cache_head *new = NULL, *freeme = NULL; | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 64 |  | 
|  | 65 | head = &detail->hash_table[hash]; | 
|  | 66 |  | 
|  | 67 | read_lock(&detail->hash_lock); | 
|  | 68 |  | 
|  | 69 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { | 
|  | 70 | struct cache_head *tmp = *hp; | 
|  | 71 | if (detail->match(tmp, key)) { | 
| NeilBrown | d202cce | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 72 | if (cache_is_expired(detail, tmp)) | 
|  | 73 | /* This entry is expired, we will discard it. */ | 
|  | 74 | break; | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 75 | cache_get(tmp); | 
|  | 76 | read_unlock(&detail->hash_lock); | 
|  | 77 | return tmp; | 
|  | 78 | } | 
|  | 79 | } | 
|  | 80 | read_unlock(&detail->hash_lock); | 
|  | 81 | /* Didn't find anything, insert an empty entry */ | 
|  | 82 |  | 
|  | 83 | new = detail->alloc(); | 
|  | 84 | if (!new) | 
|  | 85 | return NULL; | 
| Neil Brown | 2f34931 | 2006-08-05 12:14:29 -0700 | [diff] [blame] | 86 | /* must fully initialise 'new', else | 
|  | 87 | * we might get lose if we need to | 
|  | 88 | * cache_put it soon. | 
|  | 89 | */ | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 90 | cache_init(new); | 
| Neil Brown | 2f34931 | 2006-08-05 12:14:29 -0700 | [diff] [blame] | 91 | detail->init(new, key); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 92 |  | 
|  | 93 | write_lock(&detail->hash_lock); | 
|  | 94 |  | 
|  | 95 | /* check if entry appeared while we slept */ | 
|  | 96 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { | 
|  | 97 | struct cache_head *tmp = *hp; | 
|  | 98 | if (detail->match(tmp, key)) { | 
| NeilBrown | d202cce | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 99 | if (cache_is_expired(detail, tmp)) { | 
|  | 100 | *hp = tmp->next; | 
|  | 101 | tmp->next = NULL; | 
|  | 102 | detail->entries --; | 
|  | 103 | freeme = tmp; | 
|  | 104 | break; | 
|  | 105 | } | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 106 | cache_get(tmp); | 
|  | 107 | write_unlock(&detail->hash_lock); | 
| NeilBrown | baab935 | 2006-03-27 01:15:09 -0800 | [diff] [blame] | 108 | cache_put(new, detail); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 109 | return tmp; | 
|  | 110 | } | 
|  | 111 | } | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 112 | new->next = *head; | 
|  | 113 | *head = new; | 
|  | 114 | detail->entries++; | 
|  | 115 | cache_get(new); | 
|  | 116 | write_unlock(&detail->hash_lock); | 
|  | 117 |  | 
| NeilBrown | d202cce | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 118 | if (freeme) | 
|  | 119 | cache_put(freeme, detail); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 120 | return new; | 
|  | 121 | } | 
| Trond Myklebust | 24c3767 | 2008-12-23 16:30:12 -0500 | [diff] [blame] | 122 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 123 |  | 
| NeilBrown | ebd0cb1 | 2006-03-27 01:15:08 -0800 | [diff] [blame] | 124 |  | 
| NeilBrown | f866a81 | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 125 | static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); | 
| NeilBrown | ebd0cb1 | 2006-03-27 01:15:08 -0800 | [diff] [blame] | 126 |  | 
| NeilBrown | 908329f | 2009-09-09 16:32:54 +1000 | [diff] [blame] | 127 | static void cache_fresh_locked(struct cache_head *head, time_t expiry) | 
| NeilBrown | ebd0cb1 | 2006-03-27 01:15:08 -0800 | [diff] [blame] | 128 | { | 
|  | 129 | head->expiry_time = expiry; | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 130 | head->last_refresh = seconds_since_boot(); | 
| J. Bruce Fields | fdef7aa | 2011-01-04 14:12:47 -0500 | [diff] [blame] | 131 | smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ | 
| NeilBrown | 908329f | 2009-09-09 16:32:54 +1000 | [diff] [blame] | 132 | set_bit(CACHE_VALID, &head->flags); | 
| NeilBrown | ebd0cb1 | 2006-03-27 01:15:08 -0800 | [diff] [blame] | 133 | } | 
|  | 134 |  | 
|  | 135 | static void cache_fresh_unlocked(struct cache_head *head, | 
| NeilBrown | 908329f | 2009-09-09 16:32:54 +1000 | [diff] [blame] | 136 | struct cache_detail *detail) | 
| NeilBrown | ebd0cb1 | 2006-03-27 01:15:08 -0800 | [diff] [blame] | 137 | { | 
| NeilBrown | ebd0cb1 | 2006-03-27 01:15:08 -0800 | [diff] [blame] | 138 | if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { | 
|  | 139 | cache_revisit_request(head); | 
| NeilBrown | f866a81 | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 140 | cache_dequeue(detail, head); | 
| NeilBrown | ebd0cb1 | 2006-03-27 01:15:08 -0800 | [diff] [blame] | 141 | } | 
|  | 142 | } | 
|  | 143 |  | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 144 | struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | 
|  | 145 | struct cache_head *new, struct cache_head *old, int hash) | 
|  | 146 | { | 
|  | 147 | /* The 'old' entry is to be replaced by 'new'. | 
|  | 148 | * If 'old' is not VALID, we update it directly, | 
|  | 149 | * otherwise we need to replace it | 
|  | 150 | */ | 
|  | 151 | struct cache_head **head; | 
|  | 152 | struct cache_head *tmp; | 
|  | 153 |  | 
|  | 154 | if (!test_bit(CACHE_VALID, &old->flags)) { | 
|  | 155 | write_lock(&detail->hash_lock); | 
|  | 156 | if (!test_bit(CACHE_VALID, &old->flags)) { | 
|  | 157 | if (test_bit(CACHE_NEGATIVE, &new->flags)) | 
|  | 158 | set_bit(CACHE_NEGATIVE, &old->flags); | 
|  | 159 | else | 
|  | 160 | detail->update(old, new); | 
| NeilBrown | 908329f | 2009-09-09 16:32:54 +1000 | [diff] [blame] | 161 | cache_fresh_locked(old, new->expiry_time); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 162 | write_unlock(&detail->hash_lock); | 
| NeilBrown | 908329f | 2009-09-09 16:32:54 +1000 | [diff] [blame] | 163 | cache_fresh_unlocked(old, detail); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 164 | return old; | 
|  | 165 | } | 
|  | 166 | write_unlock(&detail->hash_lock); | 
|  | 167 | } | 
|  | 168 | /* We need to insert a new entry */ | 
|  | 169 | tmp = detail->alloc(); | 
|  | 170 | if (!tmp) { | 
| NeilBrown | baab935 | 2006-03-27 01:15:09 -0800 | [diff] [blame] | 171 | cache_put(old, detail); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 172 | return NULL; | 
|  | 173 | } | 
|  | 174 | cache_init(tmp); | 
|  | 175 | detail->init(tmp, old); | 
|  | 176 | head = &detail->hash_table[hash]; | 
|  | 177 |  | 
|  | 178 | write_lock(&detail->hash_lock); | 
|  | 179 | if (test_bit(CACHE_NEGATIVE, &new->flags)) | 
|  | 180 | set_bit(CACHE_NEGATIVE, &tmp->flags); | 
|  | 181 | else | 
|  | 182 | detail->update(tmp, new); | 
|  | 183 | tmp->next = *head; | 
|  | 184 | *head = tmp; | 
| NeilBrown | f2d3958 | 2006-05-22 22:35:25 -0700 | [diff] [blame] | 185 | detail->entries++; | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 186 | cache_get(tmp); | 
| NeilBrown | 908329f | 2009-09-09 16:32:54 +1000 | [diff] [blame] | 187 | cache_fresh_locked(tmp, new->expiry_time); | 
| NeilBrown | ebd0cb1 | 2006-03-27 01:15:08 -0800 | [diff] [blame] | 188 | cache_fresh_locked(old, 0); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 189 | write_unlock(&detail->hash_lock); | 
| NeilBrown | 908329f | 2009-09-09 16:32:54 +1000 | [diff] [blame] | 190 | cache_fresh_unlocked(tmp, detail); | 
|  | 191 | cache_fresh_unlocked(old, detail); | 
| NeilBrown | baab935 | 2006-03-27 01:15:09 -0800 | [diff] [blame] | 192 | cache_put(old, detail); | 
| NeilBrown | 15a5f6b | 2006-03-27 01:15:02 -0800 | [diff] [blame] | 193 | return tmp; | 
|  | 194 | } | 
| Trond Myklebust | 24c3767 | 2008-12-23 16:30:12 -0500 | [diff] [blame] | 195 | EXPORT_SYMBOL_GPL(sunrpc_cache_update); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 |  | 
| Trond Myklebust | bc74b4f | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 197 | static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) | 
|  | 198 | { | 
|  | 199 | if (!cd->cache_upcall) | 
|  | 200 | return -EINVAL; | 
|  | 201 | return cd->cache_upcall(cd, h); | 
|  | 202 | } | 
| NeilBrown | 989a19b | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 203 |  | 
|  | 204 | static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) | 
|  | 205 | { | 
| NeilBrown | d202cce | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 206 | if (!test_bit(CACHE_VALID, &h->flags)) | 
| NeilBrown | 989a19b | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 207 | return -EAGAIN; | 
|  | 208 | else { | 
|  | 209 | /* entry is valid */ | 
|  | 210 | if (test_bit(CACHE_NEGATIVE, &h->flags)) | 
|  | 211 | return -ENOENT; | 
| J. Bruce Fields | fdef7aa | 2011-01-04 14:12:47 -0500 | [diff] [blame] | 212 | else { | 
|  | 213 | /* | 
|  | 214 | * In combination with write barrier in | 
|  | 215 | * sunrpc_cache_update, ensures that anyone | 
|  | 216 | * using the cache entry after this sees the | 
|  | 217 | * updated contents: | 
|  | 218 | */ | 
|  | 219 | smp_rmb(); | 
| NeilBrown | 989a19b | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 220 | return 0; | 
| J. Bruce Fields | fdef7aa | 2011-01-04 14:12:47 -0500 | [diff] [blame] | 221 | } | 
| NeilBrown | 989a19b | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 222 | } | 
|  | 223 | } | 
| J. Bruce Fields | e9dc122 | 2009-08-21 11:27:29 -0400 | [diff] [blame] | 224 |  | 
| J. Bruce Fields | 6bab93f | 2011-01-03 15:10:27 -0500 | [diff] [blame] | 225 | static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) | 
|  | 226 | { | 
|  | 227 | int rv; | 
|  | 228 |  | 
|  | 229 | write_lock(&detail->hash_lock); | 
|  | 230 | rv = cache_is_valid(detail, h); | 
|  | 231 | if (rv != -EAGAIN) { | 
|  | 232 | write_unlock(&detail->hash_lock); | 
|  | 233 | return rv; | 
|  | 234 | } | 
|  | 235 | set_bit(CACHE_NEGATIVE, &h->flags); | 
|  | 236 | cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); | 
|  | 237 | write_unlock(&detail->hash_lock); | 
|  | 238 | cache_fresh_unlocked(h, detail); | 
|  | 239 | return -ENOENT; | 
|  | 240 | } | 
|  | 241 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | /* | 
|  | 243 | * This is the generic cache management routine for all | 
|  | 244 | * the authentication caches. | 
|  | 245 | * It checks the currency of a cache item and will (later) | 
|  | 246 | * initiate an upcall to fill it if needed. | 
|  | 247 | * | 
|  | 248 | * | 
|  | 249 | * Returns 0 if the cache_head can be used, or cache_puts it and returns | 
| NeilBrown | 989a19b | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 250 | * -EAGAIN if upcall is pending and request has been queued | 
|  | 251 | * -ETIMEDOUT if upcall failed or request could not be queue or | 
|  | 252 | *           upcall completed but item is still invalid (implying that | 
|  | 253 | *           the cache item has been replaced with a newer one). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | * -ENOENT if cache entry was negative | 
|  | 255 | */ | 
|  | 256 | int cache_check(struct cache_detail *detail, | 
|  | 257 | struct cache_head *h, struct cache_req *rqstp) | 
|  | 258 | { | 
|  | 259 | int rv; | 
|  | 260 | long refresh_age, age; | 
|  | 261 |  | 
|  | 262 | /* First decide return status as best we can */ | 
| NeilBrown | 989a19b | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 263 | rv = cache_is_valid(detail, h); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 |  | 
|  | 265 | /* now see if we want to start an upcall */ | 
|  | 266 | refresh_age = (h->expiry_time - h->last_refresh); | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 267 | age = seconds_since_boot() - h->last_refresh; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 |  | 
|  | 269 | if (rqstp == NULL) { | 
|  | 270 | if (rv == -EAGAIN) | 
|  | 271 | rv = -ENOENT; | 
|  | 272 | } else if (rv == -EAGAIN || age > refresh_age/2) { | 
| Chuck Lever | 46121cf | 2007-01-31 12:14:08 -0500 | [diff] [blame] | 273 | dprintk("RPC:       Want update, refage=%ld, age=%ld\n", | 
|  | 274 | refresh_age, age); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { | 
|  | 276 | switch (cache_make_upcall(detail, h)) { | 
|  | 277 | case -EINVAL: | 
|  | 278 | clear_bit(CACHE_PENDING, &h->flags); | 
| NeilBrown | 5c4d263 | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 279 | cache_revisit_request(h); | 
| J. Bruce Fields | 6bab93f | 2011-01-03 15:10:27 -0500 | [diff] [blame] | 280 | rv = try_to_negate_entry(detail, h); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | case -EAGAIN: | 
|  | 283 | clear_bit(CACHE_PENDING, &h->flags); | 
|  | 284 | cache_revisit_request(h); | 
|  | 285 | break; | 
|  | 286 | } | 
|  | 287 | } | 
|  | 288 | } | 
|  | 289 |  | 
| NeilBrown | 989a19b | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 290 | if (rv == -EAGAIN) { | 
| J. Bruce Fields | d76d181 | 2011-01-02 21:28:34 -0500 | [diff] [blame] | 291 | if (!cache_defer_req(rqstp, h)) { | 
|  | 292 | /* | 
|  | 293 | * Request was not deferred; handle it as best | 
|  | 294 | * we can ourselves: | 
|  | 295 | */ | 
| NeilBrown | 989a19b | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 296 | rv = cache_is_valid(detail, h); | 
|  | 297 | if (rv == -EAGAIN) | 
|  | 298 | rv = -ETIMEDOUT; | 
|  | 299 | } | 
|  | 300 | } | 
| NeilBrown | 4013ede | 2006-03-27 01:15:07 -0800 | [diff] [blame] | 301 | if (rv) | 
| NeilBrown | baab935 | 2006-03-27 01:15:09 -0800 | [diff] [blame] | 302 | cache_put(h, detail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | return rv; | 
|  | 304 | } | 
| Trond Myklebust | 24c3767 | 2008-12-23 16:30:12 -0500 | [diff] [blame] | 305 | EXPORT_SYMBOL_GPL(cache_check); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | /* | 
|  | 308 | * caches need to be periodically cleaned. | 
|  | 309 | * For this we maintain a list of cache_detail and | 
|  | 310 | * a current pointer into that list and into the table | 
|  | 311 | * for that entry. | 
|  | 312 | * | 
|  | 313 | * Each time clean_cache is called it finds the next non-empty entry | 
|  | 314 | * in the current table and walks the list in that entry | 
|  | 315 | * looking for entries that can be removed. | 
|  | 316 | * | 
|  | 317 | * An entry gets removed if: | 
|  | 318 | * - The expiry is before current time | 
|  | 319 | * - The last_refresh time is before the flush_time for that cache | 
|  | 320 | * | 
|  | 321 | * later we might drop old entries with non-NEVER expiry if that table | 
|  | 322 | * is getting 'full' for some definition of 'full' | 
|  | 323 | * | 
|  | 324 | * The question of "how often to scan a table" is an interesting one | 
|  | 325 | * and is answered in part by the use of the "nextcheck" field in the | 
|  | 326 | * cache_detail. | 
|  | 327 | * When a scan of a table begins, the nextcheck field is set to a time | 
|  | 328 | * that is well into the future. | 
|  | 329 | * While scanning, if an expiry time is found that is earlier than the | 
|  | 330 | * current nextcheck time, nextcheck is set to that expiry time. | 
|  | 331 | * If the flush_time is ever set to a time earlier than the nextcheck | 
|  | 332 | * time, the nextcheck time is then set to that flush_time. | 
|  | 333 | * | 
|  | 334 | * A table is then only scanned if the current time is at least | 
|  | 335 | * the nextcheck time. | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 336 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | */ | 
|  | 338 |  | 
|  | 339 | static LIST_HEAD(cache_list); | 
|  | 340 | static DEFINE_SPINLOCK(cache_list_lock); | 
|  | 341 | static struct cache_detail *current_detail; | 
|  | 342 | static int current_index; | 
|  | 343 |  | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 344 | static void do_cache_clean(struct work_struct *work); | 
| Artem Bityutskiy | 8eab945 | 2010-07-01 18:05:56 +0300 | [diff] [blame] | 345 | static struct delayed_work cache_cleaner; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 |  | 
| Stanislav Kinsbursky | 820f944 | 2011-11-25 17:12:40 +0300 | [diff] [blame] | 347 | void sunrpc_init_cache_detail(struct cache_detail *cd) | 
| J. Bruce Fields | ffe9386 | 2007-11-12 17:04:29 -0500 | [diff] [blame] | 348 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | rwlock_init(&cd->hash_lock); | 
|  | 350 | INIT_LIST_HEAD(&cd->queue); | 
|  | 351 | spin_lock(&cache_list_lock); | 
|  | 352 | cd->nextcheck = 0; | 
|  | 353 | cd->entries = 0; | 
|  | 354 | atomic_set(&cd->readers, 0); | 
|  | 355 | cd->last_close = 0; | 
|  | 356 | cd->last_warn = -1; | 
|  | 357 | list_add(&cd->others, &cache_list); | 
|  | 358 | spin_unlock(&cache_list_lock); | 
|  | 359 |  | 
|  | 360 | /* start the cleaning process */ | 
| David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 361 | schedule_delayed_work(&cache_cleaner, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | } | 
| Stanislav Kinsbursky | 820f944 | 2011-11-25 17:12:40 +0300 | [diff] [blame] | 363 | EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 |  | 
| Stanislav Kinsbursky | 820f944 | 2011-11-25 17:12:40 +0300 | [diff] [blame] | 365 | void sunrpc_destroy_cache_detail(struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | { | 
|  | 367 | cache_purge(cd); | 
|  | 368 | spin_lock(&cache_list_lock); | 
|  | 369 | write_lock(&cd->hash_lock); | 
|  | 370 | if (cd->entries || atomic_read(&cd->inuse)) { | 
|  | 371 | write_unlock(&cd->hash_lock); | 
|  | 372 | spin_unlock(&cache_list_lock); | 
| J. Bruce Fields | df95a9d | 2007-11-08 16:09:59 -0500 | [diff] [blame] | 373 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | } | 
|  | 375 | if (current_detail == cd) | 
|  | 376 | current_detail = NULL; | 
|  | 377 | list_del_init(&cd->others); | 
|  | 378 | write_unlock(&cd->hash_lock); | 
|  | 379 | spin_unlock(&cache_list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | if (list_empty(&cache_list)) { | 
|  | 381 | /* module must be being unloaded so its safe to kill the worker */ | 
| Trond Myklebust | 4011cd9 | 2007-08-07 15:33:01 -0400 | [diff] [blame] | 382 | cancel_delayed_work_sync(&cache_cleaner); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | } | 
| J. Bruce Fields | df95a9d | 2007-11-08 16:09:59 -0500 | [diff] [blame] | 384 | return; | 
|  | 385 | out: | 
|  | 386 | printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | } | 
| Stanislav Kinsbursky | 820f944 | 2011-11-25 17:12:40 +0300 | [diff] [blame] | 388 | EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 |  | 
|  | 390 | /* clean cache tries to find something to clean | 
|  | 391 | * and cleans it. | 
|  | 392 | * It returns 1 if it cleaned something, | 
|  | 393 | *            0 if it didn't find anything this time | 
|  | 394 | *           -1 if it fell off the end of the list. | 
|  | 395 | */ | 
|  | 396 | static int cache_clean(void) | 
|  | 397 | { | 
|  | 398 | int rv = 0; | 
|  | 399 | struct list_head *next; | 
|  | 400 |  | 
|  | 401 | spin_lock(&cache_list_lock); | 
|  | 402 |  | 
|  | 403 | /* find a suitable table if we don't already have one */ | 
|  | 404 | while (current_detail == NULL || | 
|  | 405 | current_index >= current_detail->hash_size) { | 
|  | 406 | if (current_detail) | 
|  | 407 | next = current_detail->others.next; | 
|  | 408 | else | 
|  | 409 | next = cache_list.next; | 
|  | 410 | if (next == &cache_list) { | 
|  | 411 | current_detail = NULL; | 
|  | 412 | spin_unlock(&cache_list_lock); | 
|  | 413 | return -1; | 
|  | 414 | } | 
|  | 415 | current_detail = list_entry(next, struct cache_detail, others); | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 416 | if (current_detail->nextcheck > seconds_since_boot()) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | current_index = current_detail->hash_size; | 
|  | 418 | else { | 
|  | 419 | current_index = 0; | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 420 | current_detail->nextcheck = seconds_since_boot()+30*60; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | } | 
|  | 422 | } | 
|  | 423 |  | 
|  | 424 | /* find a non-empty bucket in the table */ | 
|  | 425 | while (current_detail && | 
|  | 426 | current_index < current_detail->hash_size && | 
|  | 427 | current_detail->hash_table[current_index] == NULL) | 
|  | 428 | current_index++; | 
|  | 429 |  | 
|  | 430 | /* find a cleanable entry in the bucket and clean it, or set to next bucket */ | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 431 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | if (current_detail && current_index < current_detail->hash_size) { | 
|  | 433 | struct cache_head *ch, **cp; | 
|  | 434 | struct cache_detail *d; | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 435 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | write_lock(¤t_detail->hash_lock); | 
|  | 437 |  | 
|  | 438 | /* Ok, now to clean this strand */ | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 439 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | cp = & current_detail->hash_table[current_index]; | 
| NeilBrown | 3af4974 | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 441 | for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | if (current_detail->nextcheck > ch->expiry_time) | 
|  | 443 | current_detail->nextcheck = ch->expiry_time+1; | 
| NeilBrown | 2f50d8b | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 444 | if (!cache_is_expired(current_detail, ch)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | *cp = ch->next; | 
|  | 448 | ch->next = NULL; | 
|  | 449 | current_detail->entries--; | 
|  | 450 | rv = 1; | 
| NeilBrown | 3af4974 | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 451 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | } | 
| NeilBrown | 3af4974 | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 453 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | write_unlock(¤t_detail->hash_lock); | 
|  | 455 | d = current_detail; | 
|  | 456 | if (!ch) | 
|  | 457 | current_index ++; | 
|  | 458 | spin_unlock(&cache_list_lock); | 
| NeilBrown | 5c4d263 | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 459 | if (ch) { | 
| NeilBrown | 3af4974 | 2010-02-03 17:31:31 +1100 | [diff] [blame] | 460 | if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) | 
|  | 461 | cache_dequeue(current_detail, ch); | 
| NeilBrown | 5c4d263 | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 462 | cache_revisit_request(ch); | 
| NeilBrown | baab935 | 2006-03-27 01:15:09 -0800 | [diff] [blame] | 463 | cache_put(ch, d); | 
| NeilBrown | 5c4d263 | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 464 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | } else | 
|  | 466 | spin_unlock(&cache_list_lock); | 
|  | 467 |  | 
|  | 468 | return rv; | 
|  | 469 | } | 
|  | 470 |  | 
|  | 471 | /* | 
|  | 472 | * We want to regularly clean the cache, so we need to schedule some work ... | 
|  | 473 | */ | 
| David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 474 | static void do_cache_clean(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | { | 
|  | 476 | int delay = 5; | 
|  | 477 | if (cache_clean() == -1) | 
| Anton Blanchard | 6aad89c | 2009-06-10 12:52:21 -0700 | [diff] [blame] | 478 | delay = round_jiffies_relative(30*HZ); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 |  | 
|  | 480 | if (list_empty(&cache_list)) | 
|  | 481 | delay = 0; | 
|  | 482 |  | 
|  | 483 | if (delay) | 
|  | 484 | schedule_delayed_work(&cache_cleaner, delay); | 
|  | 485 | } | 
|  | 486 |  | 
|  | 487 |  | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 488 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | * Clean all caches promptly.  This just calls cache_clean | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 490 | * repeatedly until we are sure that every cache has had a chance to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | * be fully cleaned | 
|  | 492 | */ | 
|  | 493 | void cache_flush(void) | 
|  | 494 | { | 
|  | 495 | while (cache_clean() != -1) | 
|  | 496 | cond_resched(); | 
|  | 497 | while (cache_clean() != -1) | 
|  | 498 | cond_resched(); | 
|  | 499 | } | 
| Trond Myklebust | 24c3767 | 2008-12-23 16:30:12 -0500 | [diff] [blame] | 500 | EXPORT_SYMBOL_GPL(cache_flush); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 |  | 
|  | 502 | void cache_purge(struct cache_detail *detail) | 
|  | 503 | { | 
|  | 504 | detail->flush_time = LONG_MAX; | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 505 | detail->nextcheck = seconds_since_boot(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | cache_flush(); | 
|  | 507 | detail->flush_time = 1; | 
|  | 508 | } | 
| Trond Myklebust | 24c3767 | 2008-12-23 16:30:12 -0500 | [diff] [blame] | 509 | EXPORT_SYMBOL_GPL(cache_purge); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 |  | 
|  | 511 |  | 
|  | 512 | /* | 
|  | 513 | * Deferral and Revisiting of Requests. | 
|  | 514 | * | 
|  | 515 | * If a cache lookup finds a pending entry, we | 
|  | 516 | * need to defer the request and revisit it later. | 
|  | 517 | * All deferred requests are stored in a hash table, | 
|  | 518 | * indexed by "struct cache_head *". | 
|  | 519 | * As it may be wasteful to store a whole request | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 520 | * structure, we allow the request to provide a | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | * deferred form, which must contain a | 
|  | 522 | * 'struct cache_deferred_req' | 
|  | 523 | * This cache_deferred_req contains a method to allow | 
|  | 524 | * it to be revisited when cache info is available | 
|  | 525 | */ | 
|  | 526 |  | 
|  | 527 | #define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head)) | 
|  | 528 | #define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) | 
|  | 529 |  | 
|  | 530 | #define	DFR_MAX	300	/* ??? */ | 
|  | 531 |  | 
|  | 532 | static DEFINE_SPINLOCK(cache_defer_lock); | 
|  | 533 | static LIST_HEAD(cache_defer_list); | 
| NeilBrown | 1117449 | 2010-08-12 17:04:08 +1000 | [diff] [blame] | 534 | static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | static int cache_defer_cnt; | 
|  | 536 |  | 
| J. Bruce Fields | 6610f72 | 2010-08-26 13:19:52 -0400 | [diff] [blame] | 537 | static void __unhash_deferred_req(struct cache_deferred_req *dreq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | { | 
| NeilBrown | 1117449 | 2010-08-12 17:04:08 +1000 | [diff] [blame] | 539 | hlist_del_init(&dreq->hash); | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 540 | if (!list_empty(&dreq->recent)) { | 
|  | 541 | list_del_init(&dreq->recent); | 
|  | 542 | cache_defer_cnt--; | 
|  | 543 | } | 
| J. Bruce Fields | 6610f72 | 2010-08-26 13:19:52 -0400 | [diff] [blame] | 544 | } | 
|  | 545 |  | 
|  | 546 | static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) | 
|  | 547 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | int hash = DFR_HASH(item); | 
|  | 549 |  | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 550 | INIT_LIST_HEAD(&dreq->recent); | 
| NeilBrown | 1117449 | 2010-08-12 17:04:08 +1000 | [diff] [blame] | 551 | hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); | 
| J. Bruce Fields | 6610f72 | 2010-08-26 13:19:52 -0400 | [diff] [blame] | 552 | } | 
|  | 553 |  | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 554 | static void setup_deferral(struct cache_deferred_req *dreq, | 
|  | 555 | struct cache_head *item, | 
|  | 556 | int count_me) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 |  | 
|  | 559 | dreq->item = item; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 |  | 
|  | 561 | spin_lock(&cache_defer_lock); | 
|  | 562 |  | 
| J. Bruce Fields | 6610f72 | 2010-08-26 13:19:52 -0400 | [diff] [blame] | 563 | __hash_deferred_req(dreq, item); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 |  | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 565 | if (count_me) { | 
|  | 566 | cache_defer_cnt++; | 
|  | 567 | list_add(&dreq->recent, &cache_defer_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | } | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 569 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | spin_unlock(&cache_defer_lock); | 
|  | 571 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 |  | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 574 | struct thread_deferred_req { | 
|  | 575 | struct cache_deferred_req handle; | 
|  | 576 | struct completion completion; | 
|  | 577 | }; | 
|  | 578 |  | 
|  | 579 | static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) | 
|  | 580 | { | 
|  | 581 | struct thread_deferred_req *dr = | 
|  | 582 | container_of(dreq, struct thread_deferred_req, handle); | 
|  | 583 | complete(&dr->completion); | 
|  | 584 | } | 
|  | 585 |  | 
| NeilBrown | d29068c | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 586 | static void cache_wait_req(struct cache_req *req, struct cache_head *item) | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 587 | { | 
|  | 588 | struct thread_deferred_req sleeper; | 
|  | 589 | struct cache_deferred_req *dreq = &sleeper.handle; | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 590 |  | 
|  | 591 | sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); | 
|  | 592 | dreq->revisit = cache_restart_thread; | 
|  | 593 |  | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 594 | setup_deferral(dreq, item, 0); | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 595 |  | 
| NeilBrown | d29068c | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 596 | if (!test_bit(CACHE_PENDING, &item->flags) || | 
| NeilBrown | 277f68d | 2010-09-22 12:55:06 +1000 | [diff] [blame] | 597 | wait_for_completion_interruptible_timeout( | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 598 | &sleeper.completion, req->thread_wait) <= 0) { | 
|  | 599 | /* The completion wasn't completed, so we need | 
|  | 600 | * to clean up | 
|  | 601 | */ | 
|  | 602 | spin_lock(&cache_defer_lock); | 
| NeilBrown | 1117449 | 2010-08-12 17:04:08 +1000 | [diff] [blame] | 603 | if (!hlist_unhashed(&sleeper.handle.hash)) { | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 604 | __unhash_deferred_req(&sleeper.handle); | 
|  | 605 | spin_unlock(&cache_defer_lock); | 
|  | 606 | } else { | 
|  | 607 | /* cache_revisit_request already removed | 
|  | 608 | * this from the hash table, but hasn't | 
|  | 609 | * called ->revisit yet.  It will very soon | 
|  | 610 | * and we need to wait for it. | 
|  | 611 | */ | 
|  | 612 | spin_unlock(&cache_defer_lock); | 
|  | 613 | wait_for_completion(&sleeper.completion); | 
|  | 614 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | } | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 616 | } | 
|  | 617 |  | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 618 | static void cache_limit_defers(void) | 
|  | 619 | { | 
|  | 620 | /* Make sure we haven't exceed the limit of allowed deferred | 
|  | 621 | * requests. | 
|  | 622 | */ | 
|  | 623 | struct cache_deferred_req *discard = NULL; | 
|  | 624 |  | 
|  | 625 | if (cache_defer_cnt <= DFR_MAX) | 
|  | 626 | return; | 
|  | 627 |  | 
|  | 628 | spin_lock(&cache_defer_lock); | 
|  | 629 |  | 
|  | 630 | /* Consider removing either the first or the last */ | 
|  | 631 | if (cache_defer_cnt > DFR_MAX) { | 
|  | 632 | if (net_random() & 1) | 
|  | 633 | discard = list_entry(cache_defer_list.next, | 
|  | 634 | struct cache_deferred_req, recent); | 
|  | 635 | else | 
|  | 636 | discard = list_entry(cache_defer_list.prev, | 
|  | 637 | struct cache_deferred_req, recent); | 
|  | 638 | __unhash_deferred_req(discard); | 
|  | 639 | } | 
|  | 640 | spin_unlock(&cache_defer_lock); | 
|  | 641 | if (discard) | 
|  | 642 | discard->revisit(discard, 1); | 
|  | 643 | } | 
|  | 644 |  | 
| J. Bruce Fields | d76d181 | 2011-01-02 21:28:34 -0500 | [diff] [blame] | 645 | /* Return true if and only if a deferred request is queued. */ | 
|  | 646 | static bool cache_defer_req(struct cache_req *req, struct cache_head *item) | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 647 | { | 
|  | 648 | struct cache_deferred_req *dreq; | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 649 |  | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 650 | if (req->thread_wait) { | 
| NeilBrown | d29068c | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 651 | cache_wait_req(req, item); | 
|  | 652 | if (!test_bit(CACHE_PENDING, &item->flags)) | 
| J. Bruce Fields | d76d181 | 2011-01-02 21:28:34 -0500 | [diff] [blame] | 653 | return false; | 
| J. Bruce Fields | 3211af1 | 2010-08-26 16:56:23 -0400 | [diff] [blame] | 654 | } | 
|  | 655 | dreq = req->defer(req); | 
|  | 656 | if (dreq == NULL) | 
| J. Bruce Fields | d76d181 | 2011-01-02 21:28:34 -0500 | [diff] [blame] | 657 | return false; | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 658 | setup_deferral(dreq, item, 1); | 
| NeilBrown | d29068c | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 659 | if (!test_bit(CACHE_PENDING, &item->flags)) | 
|  | 660 | /* Bit could have been cleared before we managed to | 
|  | 661 | * set up the deferral, so need to revisit just in case | 
|  | 662 | */ | 
|  | 663 | cache_revisit_request(item); | 
| NeilBrown | e33534d | 2010-10-07 15:29:46 +1100 | [diff] [blame] | 664 |  | 
|  | 665 | cache_limit_defers(); | 
| J. Bruce Fields | d76d181 | 2011-01-02 21:28:34 -0500 | [diff] [blame] | 666 | return true; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | } | 
|  | 668 |  | 
|  | 669 | static void cache_revisit_request(struct cache_head *item) | 
|  | 670 | { | 
|  | 671 | struct cache_deferred_req *dreq; | 
|  | 672 | struct list_head pending; | 
| NeilBrown | 1117449 | 2010-08-12 17:04:08 +1000 | [diff] [blame] | 673 | struct hlist_node *lp, *tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | int hash = DFR_HASH(item); | 
|  | 675 |  | 
|  | 676 | INIT_LIST_HEAD(&pending); | 
|  | 677 | spin_lock(&cache_defer_lock); | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 678 |  | 
| NeilBrown | 1117449 | 2010-08-12 17:04:08 +1000 | [diff] [blame] | 679 | hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash) | 
|  | 680 | if (dreq->item == item) { | 
|  | 681 | __unhash_deferred_req(dreq); | 
|  | 682 | list_add(&dreq->recent, &pending); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | } | 
| NeilBrown | 1117449 | 2010-08-12 17:04:08 +1000 | [diff] [blame] | 684 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | spin_unlock(&cache_defer_lock); | 
|  | 686 |  | 
|  | 687 | while (!list_empty(&pending)) { | 
|  | 688 | dreq = list_entry(pending.next, struct cache_deferred_req, recent); | 
|  | 689 | list_del_init(&dreq->recent); | 
|  | 690 | dreq->revisit(dreq, 0); | 
|  | 691 | } | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | void cache_clean_deferred(void *owner) | 
|  | 695 | { | 
|  | 696 | struct cache_deferred_req *dreq, *tmp; | 
|  | 697 | struct list_head pending; | 
|  | 698 |  | 
|  | 699 |  | 
|  | 700 | INIT_LIST_HEAD(&pending); | 
|  | 701 | spin_lock(&cache_defer_lock); | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 702 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { | 
|  | 704 | if (dreq->owner == owner) { | 
| J. Bruce Fields | 6610f72 | 2010-08-26 13:19:52 -0400 | [diff] [blame] | 705 | __unhash_deferred_req(dreq); | 
| NeilBrown | e95dffa | 2010-09-22 12:55:06 +1000 | [diff] [blame] | 706 | list_add(&dreq->recent, &pending); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | } | 
|  | 708 | } | 
|  | 709 | spin_unlock(&cache_defer_lock); | 
|  | 710 |  | 
|  | 711 | while (!list_empty(&pending)) { | 
|  | 712 | dreq = list_entry(pending.next, struct cache_deferred_req, recent); | 
|  | 713 | list_del_init(&dreq->recent); | 
|  | 714 | dreq->revisit(dreq, 1); | 
|  | 715 | } | 
|  | 716 | } | 
|  | 717 |  | 
|  | 718 | /* | 
|  | 719 | * communicate with user-space | 
|  | 720 | * | 
| J. Bruce Fields | a490c68 | 2007-11-06 14:15:19 -0500 | [diff] [blame] | 721 | * We have a magic /proc file - /proc/sunrpc/<cachename>/channel. | 
|  | 722 | * On read, you get a full request, or block. | 
|  | 723 | * On write, an update request is processed. | 
|  | 724 | * Poll works if anything to read, and always allows write. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | * | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 726 | * Implemented by linked list of requests.  Each open file has | 
| J. Bruce Fields | a490c68 | 2007-11-06 14:15:19 -0500 | [diff] [blame] | 727 | * a ->private that also exists in this list.  New requests are added | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | * to the end and may wakeup and preceding readers. | 
|  | 729 | * New readers are added to the head.  If, on read, an item is found with | 
|  | 730 | * CACHE_UPCALLING clear, we free it from the list. | 
|  | 731 | * | 
|  | 732 | */ | 
|  | 733 |  | 
|  | 734 | static DEFINE_SPINLOCK(queue_lock); | 
| Arjan van de Ven | 4a3e2f7 | 2006-03-20 22:33:17 -0800 | [diff] [blame] | 735 | static DEFINE_MUTEX(queue_io_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 |  | 
|  | 737 | struct cache_queue { | 
|  | 738 | struct list_head	list; | 
|  | 739 | int			reader;	/* if 0, then request */ | 
|  | 740 | }; | 
|  | 741 | struct cache_request { | 
|  | 742 | struct cache_queue	q; | 
|  | 743 | struct cache_head	*item; | 
|  | 744 | char			* buf; | 
|  | 745 | int			len; | 
|  | 746 | int			readers; | 
|  | 747 | }; | 
|  | 748 | struct cache_reader { | 
|  | 749 | struct cache_queue	q; | 
|  | 750 | int			offset;	/* if non-0, we have a refcnt on next request */ | 
|  | 751 | }; | 
|  | 752 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 753 | static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, | 
|  | 754 | loff_t *ppos, struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | { | 
|  | 756 | struct cache_reader *rp = filp->private_data; | 
|  | 757 | struct cache_request *rq; | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 758 | struct inode *inode = filp->f_path.dentry->d_inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | int err; | 
|  | 760 |  | 
|  | 761 | if (count == 0) | 
|  | 762 | return 0; | 
|  | 763 |  | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 764 | mutex_lock(&inode->i_mutex); /* protect against multiple concurrent | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | * readers on this file */ | 
|  | 766 | again: | 
|  | 767 | spin_lock(&queue_lock); | 
|  | 768 | /* need to find next request */ | 
|  | 769 | while (rp->q.list.next != &cd->queue && | 
|  | 770 | list_entry(rp->q.list.next, struct cache_queue, list) | 
|  | 771 | ->reader) { | 
|  | 772 | struct list_head *next = rp->q.list.next; | 
|  | 773 | list_move(&rp->q.list, next); | 
|  | 774 | } | 
|  | 775 | if (rp->q.list.next == &cd->queue) { | 
|  | 776 | spin_unlock(&queue_lock); | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 777 | mutex_unlock(&inode->i_mutex); | 
| Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 778 | BUG_ON(rp->offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | return 0; | 
|  | 780 | } | 
|  | 781 | rq = container_of(rp->q.list.next, struct cache_request, q.list); | 
| Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 782 | BUG_ON(rq->q.reader); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | if (rp->offset == 0) | 
|  | 784 | rq->readers++; | 
|  | 785 | spin_unlock(&queue_lock); | 
|  | 786 |  | 
|  | 787 | if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { | 
|  | 788 | err = -EAGAIN; | 
|  | 789 | spin_lock(&queue_lock); | 
|  | 790 | list_move(&rp->q.list, &rq->q.list); | 
|  | 791 | spin_unlock(&queue_lock); | 
|  | 792 | } else { | 
|  | 793 | if (rp->offset + count > rq->len) | 
|  | 794 | count = rq->len - rp->offset; | 
|  | 795 | err = -EFAULT; | 
|  | 796 | if (copy_to_user(buf, rq->buf + rp->offset, count)) | 
|  | 797 | goto out; | 
|  | 798 | rp->offset += count; | 
|  | 799 | if (rp->offset >= rq->len) { | 
|  | 800 | rp->offset = 0; | 
|  | 801 | spin_lock(&queue_lock); | 
|  | 802 | list_move(&rp->q.list, &rq->q.list); | 
|  | 803 | spin_unlock(&queue_lock); | 
|  | 804 | } | 
|  | 805 | err = 0; | 
|  | 806 | } | 
|  | 807 | out: | 
|  | 808 | if (rp->offset == 0) { | 
|  | 809 | /* need to release rq */ | 
|  | 810 | spin_lock(&queue_lock); | 
|  | 811 | rq->readers--; | 
|  | 812 | if (rq->readers == 0 && | 
|  | 813 | !test_bit(CACHE_PENDING, &rq->item->flags)) { | 
|  | 814 | list_del(&rq->q.list); | 
|  | 815 | spin_unlock(&queue_lock); | 
| NeilBrown | baab935 | 2006-03-27 01:15:09 -0800 | [diff] [blame] | 816 | cache_put(rq->item, cd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | kfree(rq->buf); | 
|  | 818 | kfree(rq); | 
|  | 819 | } else | 
|  | 820 | spin_unlock(&queue_lock); | 
|  | 821 | } | 
|  | 822 | if (err == -EAGAIN) | 
|  | 823 | goto again; | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 824 | mutex_unlock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | return err ? err :  count; | 
|  | 826 | } | 
|  | 827 |  | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 828 | static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, | 
|  | 829 | size_t count, struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | { | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 831 | ssize_t ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 |  | 
| Dan Carpenter | 6d8d174 | 2012-01-18 12:56:02 +0300 | [diff] [blame] | 833 | if (count == 0) | 
|  | 834 | return -EINVAL; | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 835 | if (copy_from_user(kaddr, buf, count)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | return -EFAULT; | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 837 | kaddr[count] = '\0'; | 
|  | 838 | ret = cd->cache_parse(cd, kaddr, count); | 
|  | 839 | if (!ret) | 
|  | 840 | ret = count; | 
|  | 841 | return ret; | 
|  | 842 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 |  | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 844 | static ssize_t cache_slow_downcall(const char __user *buf, | 
|  | 845 | size_t count, struct cache_detail *cd) | 
|  | 846 | { | 
|  | 847 | static char write_buf[8192]; /* protected by queue_io_mutex */ | 
|  | 848 | ssize_t ret = -EINVAL; | 
|  | 849 |  | 
|  | 850 | if (count >= sizeof(write_buf)) | 
|  | 851 | goto out; | 
|  | 852 | mutex_lock(&queue_io_mutex); | 
|  | 853 | ret = cache_do_downcall(write_buf, buf, count, cd); | 
| Arjan van de Ven | 4a3e2f7 | 2006-03-20 22:33:17 -0800 | [diff] [blame] | 854 | mutex_unlock(&queue_io_mutex); | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 855 | out: | 
|  | 856 | return ret; | 
|  | 857 | } | 
|  | 858 |  | 
|  | 859 | static ssize_t cache_downcall(struct address_space *mapping, | 
|  | 860 | const char __user *buf, | 
|  | 861 | size_t count, struct cache_detail *cd) | 
|  | 862 | { | 
|  | 863 | struct page *page; | 
|  | 864 | char *kaddr; | 
|  | 865 | ssize_t ret = -ENOMEM; | 
|  | 866 |  | 
|  | 867 | if (count >= PAGE_CACHE_SIZE) | 
|  | 868 | goto out_slow; | 
|  | 869 |  | 
|  | 870 | page = find_or_create_page(mapping, 0, GFP_KERNEL); | 
|  | 871 | if (!page) | 
|  | 872 | goto out_slow; | 
|  | 873 |  | 
|  | 874 | kaddr = kmap(page); | 
|  | 875 | ret = cache_do_downcall(kaddr, buf, count, cd); | 
|  | 876 | kunmap(page); | 
|  | 877 | unlock_page(page); | 
|  | 878 | page_cache_release(page); | 
|  | 879 | return ret; | 
|  | 880 | out_slow: | 
|  | 881 | return cache_slow_downcall(buf, count, cd); | 
|  | 882 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 884 | static ssize_t cache_write(struct file *filp, const char __user *buf, | 
|  | 885 | size_t count, loff_t *ppos, | 
|  | 886 | struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | { | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 888 | struct address_space *mapping = filp->f_mapping; | 
|  | 889 | struct inode *inode = filp->f_path.dentry->d_inode; | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 890 | ssize_t ret = -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 |  | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 892 | if (!cd->cache_parse) | 
|  | 893 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 |  | 
| Trond Myklebust | da77005 | 2009-08-09 15:14:28 -0400 | [diff] [blame] | 895 | mutex_lock(&inode->i_mutex); | 
|  | 896 | ret = cache_downcall(mapping, buf, count, cd); | 
|  | 897 | mutex_unlock(&inode->i_mutex); | 
|  | 898 | out: | 
|  | 899 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | } | 
|  | 901 |  | 
|  | 902 | static DECLARE_WAIT_QUEUE_HEAD(queue_wait); | 
|  | 903 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 904 | static unsigned int cache_poll(struct file *filp, poll_table *wait, | 
|  | 905 | struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | { | 
|  | 907 | unsigned int mask; | 
|  | 908 | struct cache_reader *rp = filp->private_data; | 
|  | 909 | struct cache_queue *cq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 |  | 
|  | 911 | poll_wait(filp, &queue_wait, wait); | 
|  | 912 |  | 
|  | 913 | /* alway allow write */ | 
|  | 914 | mask = POLL_OUT | POLLWRNORM; | 
|  | 915 |  | 
|  | 916 | if (!rp) | 
|  | 917 | return mask; | 
|  | 918 |  | 
|  | 919 | spin_lock(&queue_lock); | 
|  | 920 |  | 
|  | 921 | for (cq= &rp->q; &cq->list != &cd->queue; | 
|  | 922 | cq = list_entry(cq->list.next, struct cache_queue, list)) | 
|  | 923 | if (!cq->reader) { | 
|  | 924 | mask |= POLLIN | POLLRDNORM; | 
|  | 925 | break; | 
|  | 926 | } | 
|  | 927 | spin_unlock(&queue_lock); | 
|  | 928 | return mask; | 
|  | 929 | } | 
|  | 930 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 931 | static int cache_ioctl(struct inode *ino, struct file *filp, | 
|  | 932 | unsigned int cmd, unsigned long arg, | 
|  | 933 | struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | { | 
|  | 935 | int len = 0; | 
|  | 936 | struct cache_reader *rp = filp->private_data; | 
|  | 937 | struct cache_queue *cq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 |  | 
|  | 939 | if (cmd != FIONREAD || !rp) | 
|  | 940 | return -EINVAL; | 
|  | 941 |  | 
|  | 942 | spin_lock(&queue_lock); | 
|  | 943 |  | 
|  | 944 | /* only find the length remaining in current request, | 
|  | 945 | * or the length of the next request | 
|  | 946 | */ | 
|  | 947 | for (cq= &rp->q; &cq->list != &cd->queue; | 
|  | 948 | cq = list_entry(cq->list.next, struct cache_queue, list)) | 
|  | 949 | if (!cq->reader) { | 
|  | 950 | struct cache_request *cr = | 
|  | 951 | container_of(cq, struct cache_request, q); | 
|  | 952 | len = cr->len - rp->offset; | 
|  | 953 | break; | 
|  | 954 | } | 
|  | 955 | spin_unlock(&queue_lock); | 
|  | 956 |  | 
|  | 957 | return put_user(len, (int __user *)arg); | 
|  | 958 | } | 
|  | 959 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 960 | static int cache_open(struct inode *inode, struct file *filp, | 
|  | 961 | struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | { | 
|  | 963 | struct cache_reader *rp = NULL; | 
|  | 964 |  | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 965 | if (!cd || !try_module_get(cd->owner)) | 
|  | 966 | return -EACCES; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | nonseekable_open(inode, filp); | 
|  | 968 | if (filp->f_mode & FMODE_READ) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | rp = kmalloc(sizeof(*rp), GFP_KERNEL); | 
|  | 970 | if (!rp) | 
|  | 971 | return -ENOMEM; | 
|  | 972 | rp->offset = 0; | 
|  | 973 | rp->q.reader = 1; | 
|  | 974 | atomic_inc(&cd->readers); | 
|  | 975 | spin_lock(&queue_lock); | 
|  | 976 | list_add(&rp->q.list, &cd->queue); | 
|  | 977 | spin_unlock(&queue_lock); | 
|  | 978 | } | 
|  | 979 | filp->private_data = rp; | 
|  | 980 | return 0; | 
|  | 981 | } | 
|  | 982 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 983 | static int cache_release(struct inode *inode, struct file *filp, | 
|  | 984 | struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | { | 
|  | 986 | struct cache_reader *rp = filp->private_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 |  | 
|  | 988 | if (rp) { | 
|  | 989 | spin_lock(&queue_lock); | 
|  | 990 | if (rp->offset) { | 
|  | 991 | struct cache_queue *cq; | 
|  | 992 | for (cq= &rp->q; &cq->list != &cd->queue; | 
|  | 993 | cq = list_entry(cq->list.next, struct cache_queue, list)) | 
|  | 994 | if (!cq->reader) { | 
|  | 995 | container_of(cq, struct cache_request, q) | 
|  | 996 | ->readers--; | 
|  | 997 | break; | 
|  | 998 | } | 
|  | 999 | rp->offset = 0; | 
|  | 1000 | } | 
|  | 1001 | list_del(&rp->q.list); | 
|  | 1002 | spin_unlock(&queue_lock); | 
|  | 1003 |  | 
|  | 1004 | filp->private_data = NULL; | 
|  | 1005 | kfree(rp); | 
|  | 1006 |  | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 1007 | cd->last_close = seconds_since_boot(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1008 | atomic_dec(&cd->readers); | 
|  | 1009 | } | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1010 | module_put(cd->owner); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | return 0; | 
|  | 1012 | } | 
|  | 1013 |  | 
|  | 1014 |  | 
|  | 1015 |  | 
| NeilBrown | f866a81 | 2009-08-04 15:22:38 +1000 | [diff] [blame] | 1016 | static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | { | 
|  | 1018 | struct cache_queue *cq; | 
|  | 1019 | spin_lock(&queue_lock); | 
|  | 1020 | list_for_each_entry(cq, &detail->queue, list) | 
|  | 1021 | if (!cq->reader) { | 
|  | 1022 | struct cache_request *cr = container_of(cq, struct cache_request, q); | 
|  | 1023 | if (cr->item != ch) | 
|  | 1024 | continue; | 
|  | 1025 | if (cr->readers != 0) | 
| NeilBrown | 4013ede | 2006-03-27 01:15:07 -0800 | [diff] [blame] | 1026 | continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | list_del(&cr->q.list); | 
|  | 1028 | spin_unlock(&queue_lock); | 
| NeilBrown | baab935 | 2006-03-27 01:15:09 -0800 | [diff] [blame] | 1029 | cache_put(cr->item, detail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | kfree(cr->buf); | 
|  | 1031 | kfree(cr); | 
|  | 1032 | return; | 
|  | 1033 | } | 
|  | 1034 | spin_unlock(&queue_lock); | 
|  | 1035 | } | 
|  | 1036 |  | 
|  | 1037 | /* | 
|  | 1038 | * Support routines for text-based upcalls. | 
|  | 1039 | * Fields are separated by spaces. | 
|  | 1040 | * Fields are either mangled to quote space tab newline slosh with slosh | 
|  | 1041 | * or a hexified with a leading \x | 
|  | 1042 | * Record is terminated with newline. | 
|  | 1043 | * | 
|  | 1044 | */ | 
|  | 1045 |  | 
|  | 1046 | void qword_add(char **bpp, int *lp, char *str) | 
|  | 1047 | { | 
|  | 1048 | char *bp = *bpp; | 
|  | 1049 | int len = *lp; | 
|  | 1050 | char c; | 
|  | 1051 |  | 
|  | 1052 | if (len < 0) return; | 
|  | 1053 |  | 
|  | 1054 | while ((c=*str++) && len) | 
|  | 1055 | switch(c) { | 
|  | 1056 | case ' ': | 
|  | 1057 | case '\t': | 
|  | 1058 | case '\n': | 
|  | 1059 | case '\\': | 
|  | 1060 | if (len >= 4) { | 
|  | 1061 | *bp++ = '\\'; | 
|  | 1062 | *bp++ = '0' + ((c & 0300)>>6); | 
|  | 1063 | *bp++ = '0' + ((c & 0070)>>3); | 
|  | 1064 | *bp++ = '0' + ((c & 0007)>>0); | 
|  | 1065 | } | 
|  | 1066 | len -= 4; | 
|  | 1067 | break; | 
|  | 1068 | default: | 
|  | 1069 | *bp++ = c; | 
|  | 1070 | len--; | 
|  | 1071 | } | 
|  | 1072 | if (c || len <1) len = -1; | 
|  | 1073 | else { | 
|  | 1074 | *bp++ = ' '; | 
|  | 1075 | len--; | 
|  | 1076 | } | 
|  | 1077 | *bpp = bp; | 
|  | 1078 | *lp = len; | 
|  | 1079 | } | 
| Trond Myklebust | 24c3767 | 2008-12-23 16:30:12 -0500 | [diff] [blame] | 1080 | EXPORT_SYMBOL_GPL(qword_add); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 |  | 
|  | 1082 | void qword_addhex(char **bpp, int *lp, char *buf, int blen) | 
|  | 1083 | { | 
|  | 1084 | char *bp = *bpp; | 
|  | 1085 | int len = *lp; | 
|  | 1086 |  | 
|  | 1087 | if (len < 0) return; | 
|  | 1088 |  | 
|  | 1089 | if (len > 2) { | 
|  | 1090 | *bp++ = '\\'; | 
|  | 1091 | *bp++ = 'x'; | 
|  | 1092 | len -= 2; | 
|  | 1093 | while (blen && len >= 2) { | 
|  | 1094 | unsigned char c = *buf++; | 
|  | 1095 | *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1); | 
|  | 1096 | *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1); | 
|  | 1097 | len -= 2; | 
|  | 1098 | blen--; | 
|  | 1099 | } | 
|  | 1100 | } | 
|  | 1101 | if (blen || len<1) len = -1; | 
|  | 1102 | else { | 
|  | 1103 | *bp++ = ' '; | 
|  | 1104 | len--; | 
|  | 1105 | } | 
|  | 1106 | *bpp = bp; | 
|  | 1107 | *lp = len; | 
|  | 1108 | } | 
| Trond Myklebust | 24c3767 | 2008-12-23 16:30:12 -0500 | [diff] [blame] | 1109 | EXPORT_SYMBOL_GPL(qword_addhex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 |  | 
|  | 1111 | static void warn_no_listener(struct cache_detail *detail) | 
|  | 1112 | { | 
|  | 1113 | if (detail->last_warn != detail->last_close) { | 
|  | 1114 | detail->last_warn = detail->last_close; | 
|  | 1115 | if (detail->warn_no_listener) | 
| Trond Myklebust | 2da8ca2 | 2009-08-09 15:14:26 -0400 | [diff] [blame] | 1116 | detail->warn_no_listener(detail, detail->last_close != 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | } | 
|  | 1118 | } | 
|  | 1119 |  | 
| J. Bruce Fields | 0649752 | 2010-09-19 22:55:06 -0400 | [diff] [blame] | 1120 | static bool cache_listeners_exist(struct cache_detail *detail) | 
|  | 1121 | { | 
|  | 1122 | if (atomic_read(&detail->readers)) | 
|  | 1123 | return true; | 
|  | 1124 | if (detail->last_close == 0) | 
|  | 1125 | /* This cache was never opened */ | 
|  | 1126 | return false; | 
|  | 1127 | if (detail->last_close < seconds_since_boot() - 30) | 
|  | 1128 | /* | 
|  | 1129 | * We allow for the possibility that someone might | 
|  | 1130 | * restart a userspace daemon without restarting the | 
|  | 1131 | * server; but after 30 seconds, we give up. | 
|  | 1132 | */ | 
|  | 1133 | return false; | 
|  | 1134 | return true; | 
|  | 1135 | } | 
|  | 1136 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1137 | /* | 
| Trond Myklebust | bc74b4f | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1138 | * register an upcall request to user-space and queue it up for read() by the | 
|  | 1139 | * upcall daemon. | 
|  | 1140 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | * Each request is at most one page long. | 
|  | 1142 | */ | 
| Trond Myklebust | bc74b4f | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1143 | int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h, | 
|  | 1144 | void (*cache_request)(struct cache_detail *, | 
|  | 1145 | struct cache_head *, | 
|  | 1146 | char **, | 
|  | 1147 | int *)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | { | 
|  | 1149 |  | 
|  | 1150 | char *buf; | 
|  | 1151 | struct cache_request *crq; | 
|  | 1152 | char *bp; | 
|  | 1153 | int len; | 
|  | 1154 |  | 
| J. Bruce Fields | 0649752 | 2010-09-19 22:55:06 -0400 | [diff] [blame] | 1155 | if (!cache_listeners_exist(detail)) { | 
|  | 1156 | warn_no_listener(detail); | 
|  | 1157 | return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | } | 
|  | 1159 |  | 
|  | 1160 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 
|  | 1161 | if (!buf) | 
|  | 1162 | return -EAGAIN; | 
|  | 1163 |  | 
|  | 1164 | crq = kmalloc(sizeof (*crq), GFP_KERNEL); | 
|  | 1165 | if (!crq) { | 
|  | 1166 | kfree(buf); | 
|  | 1167 | return -EAGAIN; | 
|  | 1168 | } | 
|  | 1169 |  | 
|  | 1170 | bp = buf; len = PAGE_SIZE; | 
|  | 1171 |  | 
| Trond Myklebust | bc74b4f | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1172 | cache_request(detail, h, &bp, &len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 |  | 
|  | 1174 | if (len < 0) { | 
|  | 1175 | kfree(buf); | 
|  | 1176 | kfree(crq); | 
|  | 1177 | return -EAGAIN; | 
|  | 1178 | } | 
|  | 1179 | crq->q.reader = 0; | 
|  | 1180 | crq->item = cache_get(h); | 
|  | 1181 | crq->buf = buf; | 
|  | 1182 | crq->len = PAGE_SIZE - len; | 
|  | 1183 | crq->readers = 0; | 
|  | 1184 | spin_lock(&queue_lock); | 
|  | 1185 | list_add_tail(&crq->q.list, &detail->queue); | 
|  | 1186 | spin_unlock(&queue_lock); | 
|  | 1187 | wake_up(&queue_wait); | 
|  | 1188 | return 0; | 
|  | 1189 | } | 
| Trond Myklebust | bc74b4f | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1190 | EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 |  | 
|  | 1192 | /* | 
|  | 1193 | * parse a message from user-space and pass it | 
|  | 1194 | * to an appropriate cache | 
|  | 1195 | * Messages are, like requests, separated into fields by | 
|  | 1196 | * spaces and dequotes as \xHEXSTRING or embedded \nnn octal | 
|  | 1197 | * | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 1198 | * Message is | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 | *   reply cachename expiry key ... content.... | 
|  | 1200 | * | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 1201 | * key and content are both parsed by cache | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | */ | 
|  | 1203 |  | 
|  | 1204 | #define isodigit(c) (isdigit(c) && c <= '7') | 
|  | 1205 | int qword_get(char **bpp, char *dest, int bufsize) | 
|  | 1206 | { | 
|  | 1207 | /* return bytes copied, or -1 on error */ | 
|  | 1208 | char *bp = *bpp; | 
|  | 1209 | int len = 0; | 
|  | 1210 |  | 
|  | 1211 | while (*bp == ' ') bp++; | 
|  | 1212 |  | 
|  | 1213 | if (bp[0] == '\\' && bp[1] == 'x') { | 
|  | 1214 | /* HEX STRING */ | 
|  | 1215 | bp += 2; | 
| Andy Shevchenko | e7f483e | 2010-09-21 09:40:25 +0300 | [diff] [blame] | 1216 | while (len < bufsize) { | 
|  | 1217 | int h, l; | 
|  | 1218 |  | 
|  | 1219 | h = hex_to_bin(bp[0]); | 
|  | 1220 | if (h < 0) | 
|  | 1221 | break; | 
|  | 1222 |  | 
|  | 1223 | l = hex_to_bin(bp[1]); | 
|  | 1224 | if (l < 0) | 
|  | 1225 | break; | 
|  | 1226 |  | 
|  | 1227 | *dest++ = (h << 4) | l; | 
|  | 1228 | bp += 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | len++; | 
|  | 1230 | } | 
|  | 1231 | } else { | 
|  | 1232 | /* text with \nnn octal quoting */ | 
|  | 1233 | while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { | 
|  | 1234 | if (*bp == '\\' && | 
|  | 1235 | isodigit(bp[1]) && (bp[1] <= '3') && | 
|  | 1236 | isodigit(bp[2]) && | 
|  | 1237 | isodigit(bp[3])) { | 
|  | 1238 | int byte = (*++bp -'0'); | 
|  | 1239 | bp++; | 
|  | 1240 | byte = (byte << 3) | (*bp++ - '0'); | 
|  | 1241 | byte = (byte << 3) | (*bp++ - '0'); | 
|  | 1242 | *dest++ = byte; | 
|  | 1243 | len++; | 
|  | 1244 | } else { | 
|  | 1245 | *dest++ = *bp++; | 
|  | 1246 | len++; | 
|  | 1247 | } | 
|  | 1248 | } | 
|  | 1249 | } | 
|  | 1250 |  | 
|  | 1251 | if (*bp != ' ' && *bp != '\n' && *bp != '\0') | 
|  | 1252 | return -1; | 
|  | 1253 | while (*bp == ' ') bp++; | 
|  | 1254 | *bpp = bp; | 
|  | 1255 | *dest = '\0'; | 
|  | 1256 | return len; | 
|  | 1257 | } | 
| Trond Myklebust | 24c3767 | 2008-12-23 16:30:12 -0500 | [diff] [blame] | 1258 | EXPORT_SYMBOL_GPL(qword_get); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 |  | 
|  | 1260 |  | 
|  | 1261 | /* | 
|  | 1262 | * support /proc/sunrpc/cache/$CACHENAME/content | 
|  | 1263 | * as a seqfile. | 
|  | 1264 | * We call ->cache_show passing NULL for the item to | 
|  | 1265 | * get a header, then pass each real item in the cache | 
|  | 1266 | */ | 
|  | 1267 |  | 
|  | 1268 | struct handle { | 
|  | 1269 | struct cache_detail *cd; | 
|  | 1270 | }; | 
|  | 1271 |  | 
|  | 1272 | static void *c_start(struct seq_file *m, loff_t *pos) | 
| Eric Dumazet | 9a429c4 | 2008-01-01 21:58:02 -0800 | [diff] [blame] | 1273 | __acquires(cd->hash_lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | { | 
|  | 1275 | loff_t n = *pos; | 
|  | 1276 | unsigned hash, entry; | 
|  | 1277 | struct cache_head *ch; | 
|  | 1278 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 1279 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 |  | 
|  | 1281 | read_lock(&cd->hash_lock); | 
|  | 1282 | if (!n--) | 
|  | 1283 | return SEQ_START_TOKEN; | 
|  | 1284 | hash = n >> 32; | 
|  | 1285 | entry = n & ((1LL<<32) - 1); | 
|  | 1286 |  | 
|  | 1287 | for (ch=cd->hash_table[hash]; ch; ch=ch->next) | 
|  | 1288 | if (!entry--) | 
|  | 1289 | return ch; | 
|  | 1290 | n &= ~((1LL<<32) - 1); | 
|  | 1291 | do { | 
|  | 1292 | hash++; | 
|  | 1293 | n += 1LL<<32; | 
| YOSHIFUJI Hideaki | cca5172 | 2007-02-09 15:38:13 -0800 | [diff] [blame] | 1294 | } while(hash < cd->hash_size && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | cd->hash_table[hash]==NULL); | 
|  | 1296 | if (hash >= cd->hash_size) | 
|  | 1297 | return NULL; | 
|  | 1298 | *pos = n+1; | 
|  | 1299 | return cd->hash_table[hash]; | 
|  | 1300 | } | 
|  | 1301 |  | 
|  | 1302 | static void *c_next(struct seq_file *m, void *p, loff_t *pos) | 
|  | 1303 | { | 
|  | 1304 | struct cache_head *ch = p; | 
|  | 1305 | int hash = (*pos >> 32); | 
|  | 1306 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | 
|  | 1307 |  | 
|  | 1308 | if (p == SEQ_START_TOKEN) | 
|  | 1309 | hash = 0; | 
|  | 1310 | else if (ch->next == NULL) { | 
|  | 1311 | hash++; | 
|  | 1312 | *pos += 1LL<<32; | 
|  | 1313 | } else { | 
|  | 1314 | ++*pos; | 
|  | 1315 | return ch->next; | 
|  | 1316 | } | 
|  | 1317 | *pos &= ~((1LL<<32) - 1); | 
|  | 1318 | while (hash < cd->hash_size && | 
|  | 1319 | cd->hash_table[hash] == NULL) { | 
|  | 1320 | hash++; | 
|  | 1321 | *pos += 1LL<<32; | 
|  | 1322 | } | 
|  | 1323 | if (hash >= cd->hash_size) | 
|  | 1324 | return NULL; | 
|  | 1325 | ++*pos; | 
|  | 1326 | return cd->hash_table[hash]; | 
|  | 1327 | } | 
|  | 1328 |  | 
|  | 1329 | static void c_stop(struct seq_file *m, void *p) | 
| Eric Dumazet | 9a429c4 | 2008-01-01 21:58:02 -0800 | [diff] [blame] | 1330 | __releases(cd->hash_lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | { | 
|  | 1332 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | 
|  | 1333 | read_unlock(&cd->hash_lock); | 
|  | 1334 | } | 
|  | 1335 |  | 
|  | 1336 | static int c_show(struct seq_file *m, void *p) | 
|  | 1337 | { | 
|  | 1338 | struct cache_head *cp = p; | 
|  | 1339 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | 
|  | 1340 |  | 
|  | 1341 | if (p == SEQ_START_TOKEN) | 
|  | 1342 | return cd->cache_show(m, cd, NULL); | 
|  | 1343 |  | 
|  | 1344 | ifdebug(CACHE) | 
| NeilBrown | 4013ede | 2006-03-27 01:15:07 -0800 | [diff] [blame] | 1345 | seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 1346 | convert_to_wallclock(cp->expiry_time), | 
|  | 1347 | atomic_read(&cp->ref.refcount), cp->flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 | cache_get(cp); | 
|  | 1349 | if (cache_check(cd, cp, NULL)) | 
|  | 1350 | /* cache_check does a cache_put on failure */ | 
|  | 1351 | seq_printf(m, "# "); | 
|  | 1352 | else | 
|  | 1353 | cache_put(cp, cd); | 
|  | 1354 |  | 
|  | 1355 | return cd->cache_show(m, cd, cp); | 
|  | 1356 | } | 
|  | 1357 |  | 
| Philippe De Muyter | 56b3d97 | 2007-07-10 23:07:31 -0700 | [diff] [blame] | 1358 | static const struct seq_operations cache_content_op = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | .start	= c_start, | 
|  | 1360 | .next	= c_next, | 
|  | 1361 | .stop	= c_stop, | 
|  | 1362 | .show	= c_show, | 
|  | 1363 | }; | 
|  | 1364 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1365 | static int content_open(struct inode *inode, struct file *file, | 
|  | 1366 | struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1367 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | struct handle *han; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 |  | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1370 | if (!cd || !try_module_get(cd->owner)) | 
|  | 1371 | return -EACCES; | 
| Pavel Emelyanov | ec93103 | 2007-10-10 02:31:07 -0700 | [diff] [blame] | 1372 | han = __seq_open_private(file, &cache_content_op, sizeof(*han)); | 
| Li Zefan | a5990ea | 2010-03-11 14:08:10 -0800 | [diff] [blame] | 1373 | if (han == NULL) { | 
|  | 1374 | module_put(cd->owner); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | return -ENOMEM; | 
| Li Zefan | a5990ea | 2010-03-11 14:08:10 -0800 | [diff] [blame] | 1376 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 |  | 
|  | 1378 | han->cd = cd; | 
| Pavel Emelyanov | ec93103 | 2007-10-10 02:31:07 -0700 | [diff] [blame] | 1379 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1380 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 |  | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1382 | static int content_release(struct inode *inode, struct file *file, | 
|  | 1383 | struct cache_detail *cd) | 
|  | 1384 | { | 
|  | 1385 | int ret = seq_release_private(inode, file); | 
|  | 1386 | module_put(cd->owner); | 
|  | 1387 | return ret; | 
|  | 1388 | } | 
|  | 1389 |  | 
|  | 1390 | static int open_flush(struct inode *inode, struct file *file, | 
|  | 1391 | struct cache_detail *cd) | 
|  | 1392 | { | 
|  | 1393 | if (!cd || !try_module_get(cd->owner)) | 
|  | 1394 | return -EACCES; | 
|  | 1395 | return nonseekable_open(inode, file); | 
|  | 1396 | } | 
|  | 1397 |  | 
|  | 1398 | static int release_flush(struct inode *inode, struct file *file, | 
|  | 1399 | struct cache_detail *cd) | 
|  | 1400 | { | 
|  | 1401 | module_put(cd->owner); | 
|  | 1402 | return 0; | 
|  | 1403 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 |  | 
|  | 1405 | static ssize_t read_flush(struct file *file, char __user *buf, | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1406 | size_t count, loff_t *ppos, | 
|  | 1407 | struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | char tbuf[20]; | 
|  | 1410 | unsigned long p = *ppos; | 
| Chuck Lever | 01b2969 | 2007-10-26 13:31:20 -0400 | [diff] [blame] | 1411 | size_t len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 |  | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 1413 | sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1414 | len = strlen(tbuf); | 
|  | 1415 | if (p >= len) | 
|  | 1416 | return 0; | 
|  | 1417 | len -= p; | 
| Chuck Lever | 01b2969 | 2007-10-26 13:31:20 -0400 | [diff] [blame] | 1418 | if (len > count) | 
|  | 1419 | len = count; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | if (copy_to_user(buf, (void*)(tbuf+p), len)) | 
| Chuck Lever | 01b2969 | 2007-10-26 13:31:20 -0400 | [diff] [blame] | 1421 | return -EFAULT; | 
|  | 1422 | *ppos += len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | return len; | 
|  | 1424 | } | 
|  | 1425 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1426 | static ssize_t write_flush(struct file *file, const char __user *buf, | 
|  | 1427 | size_t count, loff_t *ppos, | 
|  | 1428 | struct cache_detail *cd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1430 | char tbuf[20]; | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 1431 | char *bp, *ep; | 
|  | 1432 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | if (*ppos || count > sizeof(tbuf)-1) | 
|  | 1434 | return -EINVAL; | 
|  | 1435 | if (copy_from_user(tbuf, buf, count)) | 
|  | 1436 | return -EFAULT; | 
|  | 1437 | tbuf[count] = 0; | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 1438 | simple_strtoul(tbuf, &ep, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1439 | if (*ep && *ep != '\n') | 
|  | 1440 | return -EINVAL; | 
|  | 1441 |  | 
| NeilBrown | c5b29f8 | 2010-08-12 16:55:22 +1000 | [diff] [blame] | 1442 | bp = tbuf; | 
|  | 1443 | cd->flush_time = get_expiry(&bp); | 
|  | 1444 | cd->nextcheck = seconds_since_boot(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 | cache_flush(); | 
|  | 1446 |  | 
|  | 1447 | *ppos += count; | 
|  | 1448 | return count; | 
|  | 1449 | } | 
|  | 1450 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1451 | static ssize_t cache_read_procfs(struct file *filp, char __user *buf, | 
|  | 1452 | size_t count, loff_t *ppos) | 
|  | 1453 | { | 
|  | 1454 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 
|  | 1455 |  | 
|  | 1456 | return cache_read(filp, buf, count, ppos, cd); | 
|  | 1457 | } | 
|  | 1458 |  | 
|  | 1459 | static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, | 
|  | 1460 | size_t count, loff_t *ppos) | 
|  | 1461 | { | 
|  | 1462 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 
|  | 1463 |  | 
|  | 1464 | return cache_write(filp, buf, count, ppos, cd); | 
|  | 1465 | } | 
|  | 1466 |  | 
|  | 1467 | static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait) | 
|  | 1468 | { | 
|  | 1469 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 
|  | 1470 |  | 
|  | 1471 | return cache_poll(filp, wait, cd); | 
|  | 1472 | } | 
|  | 1473 |  | 
| Frederic Weisbecker | d79b6f4 | 2010-03-30 07:27:50 +0200 | [diff] [blame] | 1474 | static long cache_ioctl_procfs(struct file *filp, | 
|  | 1475 | unsigned int cmd, unsigned long arg) | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1476 | { | 
| Frederic Weisbecker | d79b6f4 | 2010-03-30 07:27:50 +0200 | [diff] [blame] | 1477 | struct inode *inode = filp->f_path.dentry->d_inode; | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1478 | struct cache_detail *cd = PDE(inode)->data; | 
|  | 1479 |  | 
| Arnd Bergmann | a6f8dbc | 2010-10-04 21:18:23 +0200 | [diff] [blame] | 1480 | return cache_ioctl(inode, filp, cmd, arg, cd); | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1481 | } | 
|  | 1482 |  | 
|  | 1483 | static int cache_open_procfs(struct inode *inode, struct file *filp) | 
|  | 1484 | { | 
|  | 1485 | struct cache_detail *cd = PDE(inode)->data; | 
|  | 1486 |  | 
|  | 1487 | return cache_open(inode, filp, cd); | 
|  | 1488 | } | 
|  | 1489 |  | 
|  | 1490 | static int cache_release_procfs(struct inode *inode, struct file *filp) | 
|  | 1491 | { | 
|  | 1492 | struct cache_detail *cd = PDE(inode)->data; | 
|  | 1493 |  | 
|  | 1494 | return cache_release(inode, filp, cd); | 
|  | 1495 | } | 
|  | 1496 |  | 
|  | 1497 | static const struct file_operations cache_file_operations_procfs = { | 
|  | 1498 | .owner		= THIS_MODULE, | 
|  | 1499 | .llseek		= no_llseek, | 
|  | 1500 | .read		= cache_read_procfs, | 
|  | 1501 | .write		= cache_write_procfs, | 
|  | 1502 | .poll		= cache_poll_procfs, | 
| Frederic Weisbecker | d79b6f4 | 2010-03-30 07:27:50 +0200 | [diff] [blame] | 1503 | .unlocked_ioctl	= cache_ioctl_procfs, /* for FIONREAD */ | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1504 | .open		= cache_open_procfs, | 
|  | 1505 | .release	= cache_release_procfs, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | }; | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1507 |  | 
|  | 1508 | static int content_open_procfs(struct inode *inode, struct file *filp) | 
|  | 1509 | { | 
|  | 1510 | struct cache_detail *cd = PDE(inode)->data; | 
|  | 1511 |  | 
|  | 1512 | return content_open(inode, filp, cd); | 
|  | 1513 | } | 
|  | 1514 |  | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1515 | static int content_release_procfs(struct inode *inode, struct file *filp) | 
|  | 1516 | { | 
|  | 1517 | struct cache_detail *cd = PDE(inode)->data; | 
|  | 1518 |  | 
|  | 1519 | return content_release(inode, filp, cd); | 
|  | 1520 | } | 
|  | 1521 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1522 | static const struct file_operations content_file_operations_procfs = { | 
|  | 1523 | .open		= content_open_procfs, | 
|  | 1524 | .read		= seq_read, | 
|  | 1525 | .llseek		= seq_lseek, | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1526 | .release	= content_release_procfs, | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1527 | }; | 
|  | 1528 |  | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1529 | static int open_flush_procfs(struct inode *inode, struct file *filp) | 
|  | 1530 | { | 
|  | 1531 | struct cache_detail *cd = PDE(inode)->data; | 
|  | 1532 |  | 
|  | 1533 | return open_flush(inode, filp, cd); | 
|  | 1534 | } | 
|  | 1535 |  | 
|  | 1536 | static int release_flush_procfs(struct inode *inode, struct file *filp) | 
|  | 1537 | { | 
|  | 1538 | struct cache_detail *cd = PDE(inode)->data; | 
|  | 1539 |  | 
|  | 1540 | return release_flush(inode, filp, cd); | 
|  | 1541 | } | 
|  | 1542 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1543 | static ssize_t read_flush_procfs(struct file *filp, char __user *buf, | 
|  | 1544 | size_t count, loff_t *ppos) | 
|  | 1545 | { | 
|  | 1546 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 
|  | 1547 |  | 
|  | 1548 | return read_flush(filp, buf, count, ppos, cd); | 
|  | 1549 | } | 
|  | 1550 |  | 
|  | 1551 | static ssize_t write_flush_procfs(struct file *filp, | 
|  | 1552 | const char __user *buf, | 
|  | 1553 | size_t count, loff_t *ppos) | 
|  | 1554 | { | 
|  | 1555 | struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; | 
|  | 1556 |  | 
|  | 1557 | return write_flush(filp, buf, count, ppos, cd); | 
|  | 1558 | } | 
|  | 1559 |  | 
|  | 1560 | static const struct file_operations cache_flush_operations_procfs = { | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1561 | .open		= open_flush_procfs, | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1562 | .read		= read_flush_procfs, | 
|  | 1563 | .write		= write_flush_procfs, | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1564 | .release	= release_flush_procfs, | 
| Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 1565 | .llseek		= no_llseek, | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1566 | }; | 
|  | 1567 |  | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1568 | static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net) | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1569 | { | 
| Pavel Emelyanov | 4f42d0d | 2010-09-27 14:01:58 +0400 | [diff] [blame] | 1570 | struct sunrpc_net *sn; | 
|  | 1571 |  | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1572 | if (cd->u.procfs.proc_ent == NULL) | 
|  | 1573 | return; | 
|  | 1574 | if (cd->u.procfs.flush_ent) | 
|  | 1575 | remove_proc_entry("flush", cd->u.procfs.proc_ent); | 
|  | 1576 | if (cd->u.procfs.channel_ent) | 
|  | 1577 | remove_proc_entry("channel", cd->u.procfs.proc_ent); | 
|  | 1578 | if (cd->u.procfs.content_ent) | 
|  | 1579 | remove_proc_entry("content", cd->u.procfs.proc_ent); | 
|  | 1580 | cd->u.procfs.proc_ent = NULL; | 
| Pavel Emelyanov | 4f42d0d | 2010-09-27 14:01:58 +0400 | [diff] [blame] | 1581 | sn = net_generic(net, sunrpc_net_id); | 
|  | 1582 | remove_proc_entry(cd->name, sn->proc_net_rpc); | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1583 | } | 
|  | 1584 |  | 
|  | 1585 | #ifdef CONFIG_PROC_FS | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1586 | static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1587 | { | 
|  | 1588 | struct proc_dir_entry *p; | 
| Pavel Emelyanov | 4f42d0d | 2010-09-27 14:01:58 +0400 | [diff] [blame] | 1589 | struct sunrpc_net *sn; | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1590 |  | 
| Pavel Emelyanov | 4f42d0d | 2010-09-27 14:01:58 +0400 | [diff] [blame] | 1591 | sn = net_generic(net, sunrpc_net_id); | 
|  | 1592 | cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc); | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1593 | if (cd->u.procfs.proc_ent == NULL) | 
|  | 1594 | goto out_nomem; | 
|  | 1595 | cd->u.procfs.channel_ent = NULL; | 
|  | 1596 | cd->u.procfs.content_ent = NULL; | 
|  | 1597 |  | 
|  | 1598 | p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR, | 
|  | 1599 | cd->u.procfs.proc_ent, | 
|  | 1600 | &cache_flush_operations_procfs, cd); | 
|  | 1601 | cd->u.procfs.flush_ent = p; | 
|  | 1602 | if (p == NULL) | 
|  | 1603 | goto out_nomem; | 
|  | 1604 |  | 
|  | 1605 | if (cd->cache_upcall || cd->cache_parse) { | 
|  | 1606 | p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, | 
|  | 1607 | cd->u.procfs.proc_ent, | 
|  | 1608 | &cache_file_operations_procfs, cd); | 
|  | 1609 | cd->u.procfs.channel_ent = p; | 
|  | 1610 | if (p == NULL) | 
|  | 1611 | goto out_nomem; | 
|  | 1612 | } | 
|  | 1613 | if (cd->cache_show) { | 
|  | 1614 | p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR, | 
|  | 1615 | cd->u.procfs.proc_ent, | 
|  | 1616 | &content_file_operations_procfs, cd); | 
|  | 1617 | cd->u.procfs.content_ent = p; | 
|  | 1618 | if (p == NULL) | 
|  | 1619 | goto out_nomem; | 
|  | 1620 | } | 
|  | 1621 | return 0; | 
|  | 1622 | out_nomem: | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1623 | remove_cache_proc_entries(cd, net); | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1624 | return -ENOMEM; | 
|  | 1625 | } | 
|  | 1626 | #else /* CONFIG_PROC_FS */ | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1627 | static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1628 | { | 
|  | 1629 | return 0; | 
|  | 1630 | } | 
|  | 1631 | #endif | 
|  | 1632 |  | 
| Artem Bityutskiy | 8eab945 | 2010-07-01 18:05:56 +0300 | [diff] [blame] | 1633 | void __init cache_initialize(void) | 
|  | 1634 | { | 
|  | 1635 | INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); | 
|  | 1636 | } | 
|  | 1637 |  | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1638 | int cache_register_net(struct cache_detail *cd, struct net *net) | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1639 | { | 
|  | 1640 | int ret; | 
|  | 1641 |  | 
|  | 1642 | sunrpc_init_cache_detail(cd); | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1643 | ret = create_cache_proc_entries(cd, net); | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1644 | if (ret) | 
|  | 1645 | sunrpc_destroy_cache_detail(cd); | 
|  | 1646 | return ret; | 
|  | 1647 | } | 
| Stanislav Kinsbursky | f5c8593b | 2011-12-07 12:57:56 +0300 | [diff] [blame] | 1648 | EXPORT_SYMBOL_GPL(cache_register_net); | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1649 |  | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1650 | void cache_unregister_net(struct cache_detail *cd, struct net *net) | 
|  | 1651 | { | 
|  | 1652 | remove_cache_proc_entries(cd, net); | 
|  | 1653 | sunrpc_destroy_cache_detail(cd); | 
|  | 1654 | } | 
| Stanislav Kinsbursky | f5c8593b | 2011-12-07 12:57:56 +0300 | [diff] [blame] | 1655 | EXPORT_SYMBOL_GPL(cache_unregister_net); | 
| Pavel Emelyanov | 593ce16 | 2010-09-27 14:00:15 +0400 | [diff] [blame] | 1656 |  | 
| Stanislav Kinsbursky | 0a402d5 | 2012-01-19 21:42:21 +0400 | [diff] [blame] | 1657 | struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net) | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1658 | { | 
| Stanislav Kinsbursky | 0a402d5 | 2012-01-19 21:42:21 +0400 | [diff] [blame] | 1659 | struct cache_detail *cd; | 
|  | 1660 |  | 
|  | 1661 | cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); | 
|  | 1662 | if (cd == NULL) | 
|  | 1663 | return ERR_PTR(-ENOMEM); | 
|  | 1664 |  | 
|  | 1665 | cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *), | 
|  | 1666 | GFP_KERNEL); | 
|  | 1667 | if (cd->hash_table == NULL) { | 
|  | 1668 | kfree(cd); | 
|  | 1669 | return ERR_PTR(-ENOMEM); | 
|  | 1670 | } | 
|  | 1671 | cd->net = net; | 
|  | 1672 | return cd; | 
| Trond Myklebust | 173912a | 2009-08-09 15:14:29 -0400 | [diff] [blame] | 1673 | } | 
| Stanislav Kinsbursky | 0a402d5 | 2012-01-19 21:42:21 +0400 | [diff] [blame] | 1674 | EXPORT_SYMBOL_GPL(cache_create_net); | 
|  | 1675 |  | 
|  | 1676 | void cache_destroy_net(struct cache_detail *cd, struct net *net) | 
|  | 1677 | { | 
|  | 1678 | kfree(cd->hash_table); | 
|  | 1679 | kfree(cd); | 
|  | 1680 | } | 
|  | 1681 | EXPORT_SYMBOL_GPL(cache_destroy_net); | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1682 |  | 
|  | 1683 | static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, | 
|  | 1684 | size_t count, loff_t *ppos) | 
|  | 1685 | { | 
|  | 1686 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 
|  | 1687 |  | 
|  | 1688 | return cache_read(filp, buf, count, ppos, cd); | 
|  | 1689 | } | 
|  | 1690 |  | 
|  | 1691 | static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, | 
|  | 1692 | size_t count, loff_t *ppos) | 
|  | 1693 | { | 
|  | 1694 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 
|  | 1695 |  | 
|  | 1696 | return cache_write(filp, buf, count, ppos, cd); | 
|  | 1697 | } | 
|  | 1698 |  | 
|  | 1699 | static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) | 
|  | 1700 | { | 
|  | 1701 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 
|  | 1702 |  | 
|  | 1703 | return cache_poll(filp, wait, cd); | 
|  | 1704 | } | 
|  | 1705 |  | 
| Frederic Weisbecker | 9918ff2 | 2010-05-19 15:08:17 +0200 | [diff] [blame] | 1706 | static long cache_ioctl_pipefs(struct file *filp, | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1707 | unsigned int cmd, unsigned long arg) | 
|  | 1708 | { | 
| Frederic Weisbecker | 9918ff2 | 2010-05-19 15:08:17 +0200 | [diff] [blame] | 1709 | struct inode *inode = filp->f_dentry->d_inode; | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1710 | struct cache_detail *cd = RPC_I(inode)->private; | 
|  | 1711 |  | 
| Arnd Bergmann | a6f8dbc | 2010-10-04 21:18:23 +0200 | [diff] [blame] | 1712 | return cache_ioctl(inode, filp, cmd, arg, cd); | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1713 | } | 
|  | 1714 |  | 
|  | 1715 | static int cache_open_pipefs(struct inode *inode, struct file *filp) | 
|  | 1716 | { | 
|  | 1717 | struct cache_detail *cd = RPC_I(inode)->private; | 
|  | 1718 |  | 
|  | 1719 | return cache_open(inode, filp, cd); | 
|  | 1720 | } | 
|  | 1721 |  | 
|  | 1722 | static int cache_release_pipefs(struct inode *inode, struct file *filp) | 
|  | 1723 | { | 
|  | 1724 | struct cache_detail *cd = RPC_I(inode)->private; | 
|  | 1725 |  | 
|  | 1726 | return cache_release(inode, filp, cd); | 
|  | 1727 | } | 
|  | 1728 |  | 
|  | 1729 | const struct file_operations cache_file_operations_pipefs = { | 
|  | 1730 | .owner		= THIS_MODULE, | 
|  | 1731 | .llseek		= no_llseek, | 
|  | 1732 | .read		= cache_read_pipefs, | 
|  | 1733 | .write		= cache_write_pipefs, | 
|  | 1734 | .poll		= cache_poll_pipefs, | 
| Frederic Weisbecker | 9918ff2 | 2010-05-19 15:08:17 +0200 | [diff] [blame] | 1735 | .unlocked_ioctl	= cache_ioctl_pipefs, /* for FIONREAD */ | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1736 | .open		= cache_open_pipefs, | 
|  | 1737 | .release	= cache_release_pipefs, | 
|  | 1738 | }; | 
|  | 1739 |  | 
|  | 1740 | static int content_open_pipefs(struct inode *inode, struct file *filp) | 
|  | 1741 | { | 
|  | 1742 | struct cache_detail *cd = RPC_I(inode)->private; | 
|  | 1743 |  | 
|  | 1744 | return content_open(inode, filp, cd); | 
|  | 1745 | } | 
|  | 1746 |  | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1747 | static int content_release_pipefs(struct inode *inode, struct file *filp) | 
|  | 1748 | { | 
|  | 1749 | struct cache_detail *cd = RPC_I(inode)->private; | 
|  | 1750 |  | 
|  | 1751 | return content_release(inode, filp, cd); | 
|  | 1752 | } | 
|  | 1753 |  | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1754 | const struct file_operations content_file_operations_pipefs = { | 
|  | 1755 | .open		= content_open_pipefs, | 
|  | 1756 | .read		= seq_read, | 
|  | 1757 | .llseek		= seq_lseek, | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1758 | .release	= content_release_pipefs, | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1759 | }; | 
|  | 1760 |  | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1761 | static int open_flush_pipefs(struct inode *inode, struct file *filp) | 
|  | 1762 | { | 
|  | 1763 | struct cache_detail *cd = RPC_I(inode)->private; | 
|  | 1764 |  | 
|  | 1765 | return open_flush(inode, filp, cd); | 
|  | 1766 | } | 
|  | 1767 |  | 
|  | 1768 | static int release_flush_pipefs(struct inode *inode, struct file *filp) | 
|  | 1769 | { | 
|  | 1770 | struct cache_detail *cd = RPC_I(inode)->private; | 
|  | 1771 |  | 
|  | 1772 | return release_flush(inode, filp, cd); | 
|  | 1773 | } | 
|  | 1774 |  | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1775 | static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, | 
|  | 1776 | size_t count, loff_t *ppos) | 
|  | 1777 | { | 
|  | 1778 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 
|  | 1779 |  | 
|  | 1780 | return read_flush(filp, buf, count, ppos, cd); | 
|  | 1781 | } | 
|  | 1782 |  | 
|  | 1783 | static ssize_t write_flush_pipefs(struct file *filp, | 
|  | 1784 | const char __user *buf, | 
|  | 1785 | size_t count, loff_t *ppos) | 
|  | 1786 | { | 
|  | 1787 | struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; | 
|  | 1788 |  | 
|  | 1789 | return write_flush(filp, buf, count, ppos, cd); | 
|  | 1790 | } | 
|  | 1791 |  | 
|  | 1792 | const struct file_operations cache_flush_operations_pipefs = { | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1793 | .open		= open_flush_pipefs, | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1794 | .read		= read_flush_pipefs, | 
|  | 1795 | .write		= write_flush_pipefs, | 
| Trond Myklebust | f7e86ab | 2009-08-19 18:13:00 -0400 | [diff] [blame] | 1796 | .release	= release_flush_pipefs, | 
| Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 1797 | .llseek		= no_llseek, | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1798 | }; | 
|  | 1799 |  | 
|  | 1800 | int sunrpc_cache_register_pipefs(struct dentry *parent, | 
| Al Viro | 64f1426 | 2011-07-25 00:35:13 -0400 | [diff] [blame] | 1801 | const char *name, umode_t umode, | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1802 | struct cache_detail *cd) | 
|  | 1803 | { | 
|  | 1804 | struct qstr q; | 
|  | 1805 | struct dentry *dir; | 
|  | 1806 | int ret = 0; | 
|  | 1807 |  | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1808 | q.name = name; | 
|  | 1809 | q.len = strlen(name); | 
|  | 1810 | q.hash = full_name_hash(q.name, q.len); | 
|  | 1811 | dir = rpc_create_cache_dir(parent, &q, umode, cd); | 
|  | 1812 | if (!IS_ERR(dir)) | 
|  | 1813 | cd->u.pipefs.dir = dir; | 
| Stanislav Kinsbursky | 820f944 | 2011-11-25 17:12:40 +0300 | [diff] [blame] | 1814 | else | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1815 | ret = PTR_ERR(dir); | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1816 | return ret; | 
|  | 1817 | } | 
|  | 1818 | EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); | 
|  | 1819 |  | 
|  | 1820 | void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) | 
|  | 1821 | { | 
|  | 1822 | rpc_remove_cache_dir(cd->u.pipefs.dir); | 
|  | 1823 | cd->u.pipefs.dir = NULL; | 
| Trond Myklebust | 8854e82 | 2009-08-09 15:14:30 -0400 | [diff] [blame] | 1824 | } | 
|  | 1825 | EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); | 
|  | 1826 |  |