blob: 17cb0d6b9944eff727d390185febf31c4579bdfb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
4 *
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
7 *
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 */
10
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/slab.h>
Jeff Layton59766872013-02-04 12:50:00 -050012#include <linux/sunrpc/addr.h>
Jeff Layton0338dd12013-02-04 08:18:02 -050013#include <linux/highmem.h>
Jeff Layton01a7dec2013-02-04 11:57:27 -050014#include <net/checksum.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015
Boaz Harrosh9a74af22009-12-03 20:30:56 +020016#include "nfsd.h"
17#include "cache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Jeff Layton0338dd12013-02-04 08:18:02 -050019#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define HASHSIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Greg Banksfca42172009-04-01 07:28:13 +110023static struct hlist_head * cache_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -070024static struct list_head lru_head;
Jeff Layton8a8bc402013-01-28 14:41:10 -050025static struct kmem_cache *drc_slab;
Jeff Layton9dc56142013-03-27 10:15:37 -040026
27/* max number of entries allowed in the cache */
Jeff Layton0338dd12013-02-04 08:18:02 -050028static unsigned int max_drc_entries;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Greg Banksfca42172009-04-01 07:28:13 +110030/*
Jeff Layton9dc56142013-03-27 10:15:37 -040031 * Stats and other tracking of on the duplicate reply cache. All of these and
32 * the "rc" fields in nfsdstats are protected by the cache_lock
33 */
34
35/* total number of entries */
36static unsigned int num_drc_entries;
37
38/* cache misses due only to checksum comparison failures */
39static unsigned int payload_misses;
40
Jeff Layton6c6910c2013-03-27 10:15:38 -040041/* amount of memory (in bytes) currently consumed by the DRC */
42static unsigned int drc_mem_usage;
43
Jeff Layton98d821b2013-03-27 10:15:39 -040044/* longest hash chain seen */
45static unsigned int longest_chain;
46
47/* size of cache when we saw the longest hash chain */
48static unsigned int longest_chain_cachesize;
49
Jeff Layton9dc56142013-03-27 10:15:37 -040050/*
Greg Banksfca42172009-04-01 07:28:13 +110051 * Calculate the hash index from an XID.
52 */
53static inline u32 request_hash(u32 xid)
54{
55 u32 h = xid;
56 h ^= (xid >> 24);
57 return h & (HASHSIZE-1);
58}
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
Jeff Laytonaca8a232013-02-04 08:18:05 -050061static void cache_cleaner_func(struct work_struct *unused);
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -050062static int nfsd_reply_cache_shrink(struct shrinker *shrink,
63 struct shrink_control *sc);
64
65struct shrinker nfsd_reply_cache_shrinker = {
66 .shrink = nfsd_reply_cache_shrink,
67 .seeks = 1,
68};
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Greg Banksfca42172009-04-01 07:28:13 +110070/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 * locking for the reply cache:
72 * A cache entry is "single use" if c_state == RC_INPROG
73 * Otherwise, it when accessing _prev or _next, the lock must be held.
74 */
75static DEFINE_SPINLOCK(cache_lock);
Jeff Laytonaca8a232013-02-04 08:18:05 -050076static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Jeff Layton0338dd12013-02-04 08:18:02 -050078/*
79 * Put a cap on the size of the DRC based on the amount of available
80 * low memory in the machine.
81 *
82 * 64MB: 8192
83 * 128MB: 11585
84 * 256MB: 16384
85 * 512MB: 23170
86 * 1GB: 32768
87 * 2GB: 46340
88 * 4GB: 65536
89 * 8GB: 92681
90 * 16GB: 131072
91 *
92 * ...with a hard cap of 256k entries. In the worst case, each entry will be
93 * ~1k, so the above numbers should give a rough max of the amount of memory
94 * used in k.
95 */
96static unsigned int
97nfsd_cache_size_limit(void)
98{
99 unsigned int limit;
100 unsigned long low_pages = totalram_pages - totalhigh_pages;
101
102 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
103 return min_t(unsigned int, limit, 256*1024);
104}
105
Jeff Laytonf09841f2013-01-28 14:41:11 -0500106static struct svc_cacherep *
107nfsd_reply_cache_alloc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
109 struct svc_cacherep *rp;
Jeff Laytonf09841f2013-01-28 14:41:11 -0500110
111 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
112 if (rp) {
113 rp->c_state = RC_UNUSED;
114 rp->c_type = RC_NOCACHE;
115 INIT_LIST_HEAD(&rp->c_lru);
116 INIT_HLIST_NODE(&rp->c_hash);
117 }
118 return rp;
119}
120
121static void
122nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
123{
Jeff Layton6c6910c2013-03-27 10:15:38 -0400124 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
125 drc_mem_usage -= rp->c_replvec.iov_len;
Jeff Laytonf09841f2013-01-28 14:41:11 -0500126 kfree(rp->c_replvec.iov_base);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400127 }
Jeff Laytona517b602013-03-18 10:49:07 -0400128 if (!hlist_unhashed(&rp->c_hash))
129 hlist_del(&rp->c_hash);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500130 list_del(&rp->c_lru);
Jeff Layton0ee0bf72013-02-04 08:18:01 -0500131 --num_drc_entries;
Jeff Layton6c6910c2013-03-27 10:15:38 -0400132 drc_mem_usage -= sizeof(*rp);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500133 kmem_cache_free(drc_slab, rp);
134}
135
Jeff Layton2c6b6912013-02-04 08:18:04 -0500136static void
137nfsd_reply_cache_free(struct svc_cacherep *rp)
138{
139 spin_lock(&cache_lock);
140 nfsd_reply_cache_free_locked(rp);
141 spin_unlock(&cache_lock);
142}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144int nfsd_reply_cache_init(void)
145{
Jeff Laytonac534ff2013-03-15 09:16:29 -0400146 INIT_LIST_HEAD(&lru_head);
147 max_drc_entries = nfsd_cache_size_limit();
148 num_drc_entries = 0;
149
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -0500150 register_shrinker(&nfsd_reply_cache_shrinker);
Jeff Layton8a8bc402013-01-28 14:41:10 -0500151 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
152 0, 0, NULL);
153 if (!drc_slab)
154 goto out_nomem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Jeff Layton0338dd12013-02-04 08:18:02 -0500156 cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
Greg Banksfca42172009-04-01 07:28:13 +1100157 if (!cache_hash)
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500158 goto out_nomem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500160 return 0;
161out_nomem:
162 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
163 nfsd_reply_cache_shutdown();
164 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
166
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500167void nfsd_reply_cache_shutdown(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
169 struct svc_cacherep *rp;
170
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -0500171 unregister_shrinker(&nfsd_reply_cache_shrinker);
Jeff Laytonaca8a232013-02-04 08:18:05 -0500172 cancel_delayed_work_sync(&cache_cleaner);
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 while (!list_empty(&lru_head)) {
175 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500176 nfsd_reply_cache_free_locked(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 }
178
Greg Banksfca42172009-04-01 07:28:13 +1100179 kfree (cache_hash);
180 cache_hash = NULL;
Jeff Layton8a8bc402013-01-28 14:41:10 -0500181
182 if (drc_slab) {
183 kmem_cache_destroy(drc_slab);
184 drc_slab = NULL;
185 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
188/*
Jeff Laytonaca8a232013-02-04 08:18:05 -0500189 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
190 * not already scheduled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 */
192static void
193lru_put_end(struct svc_cacherep *rp)
194{
Jeff Layton56c25482013-02-04 08:18:00 -0500195 rp->c_timestamp = jiffies;
Akinobu Mitaf1166292006-06-26 00:24:46 -0700196 list_move_tail(&rp->c_lru, &lru_head);
Jeff Laytonaca8a232013-02-04 08:18:05 -0500197 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
200/*
201 * Move a cache entry from one hash list to another
202 */
203static void
204hash_refile(struct svc_cacherep *rp)
205{
206 hlist_del_init(&rp->c_hash);
Greg Banksfca42172009-04-01 07:28:13 +1100207 hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
Jeff Laytond1a07742013-01-28 14:41:13 -0500210static inline bool
211nfsd_cache_entry_expired(struct svc_cacherep *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
Jeff Laytond1a07742013-01-28 14:41:13 -0500213 return rp->c_state != RC_INPROG &&
214 time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
215}
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217/*
Jeff Laytonaca8a232013-02-04 08:18:05 -0500218 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
219 * Also prune the oldest ones when the total exceeds the max number of entries.
220 */
221static void
222prune_cache_entries(void)
223{
224 struct svc_cacherep *rp, *tmp;
225
226 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
227 if (!nfsd_cache_entry_expired(rp) &&
228 num_drc_entries <= max_drc_entries)
229 break;
230 nfsd_reply_cache_free_locked(rp);
231 }
232
233 /*
234 * Conditionally rearm the job. If we cleaned out the list, then
235 * cancel any pending run (since there won't be any work to do).
236 * Otherwise, we rearm the job or modify the existing one to run in
237 * RC_EXPIRE since we just ran the pruner.
238 */
239 if (list_empty(&lru_head))
240 cancel_delayed_work(&cache_cleaner);
241 else
242 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
243}
244
245static void
246cache_cleaner_func(struct work_struct *unused)
247{
248 spin_lock(&cache_lock);
249 prune_cache_entries();
250 spin_unlock(&cache_lock);
251}
252
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -0500253static int
254nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
255{
256 unsigned int num;
257
258 spin_lock(&cache_lock);
259 if (sc->nr_to_scan)
260 prune_cache_entries();
261 num = num_drc_entries;
262 spin_unlock(&cache_lock);
263
264 return num;
265}
266
Jeff Laytonaca8a232013-02-04 08:18:05 -0500267/*
Jeff Layton01a7dec2013-02-04 11:57:27 -0500268 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
269 */
270static __wsum
271nfsd_cache_csum(struct svc_rqst *rqstp)
272{
273 int idx;
274 unsigned int base;
275 __wsum csum;
276 struct xdr_buf *buf = &rqstp->rq_arg;
277 const unsigned char *p = buf->head[0].iov_base;
278 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
279 RC_CSUMLEN);
280 size_t len = min(buf->head[0].iov_len, csum_len);
281
282 /* rq_arg.head first */
283 csum = csum_partial(p, len, 0);
284 csum_len -= len;
285
286 /* Continue into page array */
287 idx = buf->page_base / PAGE_SIZE;
288 base = buf->page_base & ~PAGE_MASK;
289 while (csum_len) {
290 p = page_address(buf->pages[idx]) + base;
Jeff Layton56edc862013-02-15 13:36:34 -0500291 len = min_t(size_t, PAGE_SIZE - base, csum_len);
Jeff Layton01a7dec2013-02-04 11:57:27 -0500292 csum = csum_partial(p, len, csum);
293 csum_len -= len;
294 base = 0;
295 ++idx;
296 }
297 return csum;
298}
299
Jeff Layton9dc56142013-03-27 10:15:37 -0400300static bool
301nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
302{
303 /* Check RPC header info first */
304 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
305 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
306 rqstp->rq_arg.len != rp->c_len ||
307 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
308 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
309 return false;
310
311 /* compare checksum of NFS data */
312 if (csum != rp->c_csum) {
313 ++payload_misses;
314 return false;
315 }
316
317 return true;
318}
319
Jeff Layton01a7dec2013-02-04 11:57:27 -0500320/*
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500321 * Search the request hash for an entry that matches the given rqstp.
322 * Must be called with cache_lock held. Returns the found entry or
323 * NULL on failure.
324 */
325static struct svc_cacherep *
Jeff Layton01a7dec2013-02-04 11:57:27 -0500326nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500327{
Jeff Layton98d821b2013-03-27 10:15:39 -0400328 struct svc_cacherep *rp, *ret = NULL;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500329 struct hlist_head *rh;
Jeff Layton98d821b2013-03-27 10:15:39 -0400330 unsigned int entries = 0;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500331
Jeff Layton9dc56142013-03-27 10:15:37 -0400332 rh = &cache_hash[request_hash(rqstp->rq_xid)];
Linus Torvaldsb6669732013-02-28 18:02:55 -0800333 hlist_for_each_entry(rp, rh, c_hash) {
Jeff Layton98d821b2013-03-27 10:15:39 -0400334 ++entries;
335 if (nfsd_cache_match(rqstp, csum, rp)) {
336 ret = rp;
337 break;
338 }
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500339 }
Jeff Layton98d821b2013-03-27 10:15:39 -0400340
341 /* tally hash chain length stats */
342 if (entries > longest_chain) {
343 longest_chain = entries;
344 longest_chain_cachesize = num_drc_entries;
345 } else if (entries == longest_chain) {
346 /* prefer to keep the smallest cachesize possible here */
347 longest_chain_cachesize = min(longest_chain_cachesize,
348 num_drc_entries);
349 }
350
351 return ret;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500352}
353
354/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 * Try to find an entry matching the current call in the cache. When none
Jeff Layton1ac83622013-02-14 16:45:13 -0500356 * is found, we try to grab the oldest expired entry off the LRU list. If
357 * a suitable one isn't there, then drop the cache_lock and allocate a
358 * new one, then search again in case one got inserted while this thread
359 * didn't hold the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 */
361int
362nfsd_cache_lookup(struct svc_rqst *rqstp)
363{
Jeff Layton0338dd12013-02-04 08:18:02 -0500364 struct svc_cacherep *rp, *found;
Al Viroc7afef12006-10-19 23:29:02 -0700365 __be32 xid = rqstp->rq_xid;
366 u32 proto = rqstp->rq_prot,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 vers = rqstp->rq_vers,
368 proc = rqstp->rq_proc;
Jeff Layton01a7dec2013-02-04 11:57:27 -0500369 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 unsigned long age;
J. Bruce Fields10910062011-01-24 12:11:02 -0500371 int type = rqstp->rq_cachetype;
Jeff Layton0b9ea372013-03-27 10:15:37 -0400372 int rtn = RC_DOIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 rqstp->rq_cacherep = NULL;
Jeff Layton13cc8a72013-02-04 08:18:03 -0500375 if (type == RC_NOCACHE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 nfsdstats.rcnocache++;
Jeff Layton0b9ea372013-03-27 10:15:37 -0400377 return rtn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379
Jeff Layton01a7dec2013-02-04 11:57:27 -0500380 csum = nfsd_cache_csum(rqstp);
381
Jeff Layton0b9ea372013-03-27 10:15:37 -0400382 /*
383 * Since the common case is a cache miss followed by an insert,
384 * preallocate an entry. First, try to reuse the first entry on the LRU
385 * if it works, then go ahead and prune the LRU list.
386 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 spin_lock(&cache_lock);
Jeff Layton0338dd12013-02-04 08:18:02 -0500388 if (!list_empty(&lru_head)) {
389 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
390 if (nfsd_cache_entry_expired(rp) ||
Jeff Laytonaca8a232013-02-04 08:18:05 -0500391 num_drc_entries >= max_drc_entries) {
392 lru_put_end(rp);
393 prune_cache_entries();
Jeff Layton0b9ea372013-03-27 10:15:37 -0400394 goto search_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 }
396 }
Jeff Layton0338dd12013-02-04 08:18:02 -0500397
Jeff Layton0b9ea372013-03-27 10:15:37 -0400398 /* No expired ones available, allocate a new one. */
Jeff Layton0338dd12013-02-04 08:18:02 -0500399 spin_unlock(&cache_lock);
400 rp = nfsd_reply_cache_alloc();
Jeff Layton0338dd12013-02-04 08:18:02 -0500401 spin_lock(&cache_lock);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400402 if (likely(rp)) {
Jeff Layton0b9ea372013-03-27 10:15:37 -0400403 ++num_drc_entries;
Jeff Layton6c6910c2013-03-27 10:15:38 -0400404 drc_mem_usage += sizeof(*rp);
405 }
Jeff Layton0338dd12013-02-04 08:18:02 -0500406
Jeff Layton0b9ea372013-03-27 10:15:37 -0400407search_cache:
Jeff Layton01a7dec2013-02-04 11:57:27 -0500408 found = nfsd_cache_search(rqstp, csum);
Jeff Layton0338dd12013-02-04 08:18:02 -0500409 if (found) {
Jeff Layton0b9ea372013-03-27 10:15:37 -0400410 if (likely(rp))
411 nfsd_reply_cache_free_locked(rp);
Jeff Layton0338dd12013-02-04 08:18:02 -0500412 rp = found;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500413 goto found_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
Jeff Layton0338dd12013-02-04 08:18:02 -0500415
Jeff Layton0b9ea372013-03-27 10:15:37 -0400416 if (!rp) {
417 dprintk("nfsd: unable to allocate DRC entry!\n");
418 goto out;
419 }
420
Jeff Layton0338dd12013-02-04 08:18:02 -0500421 /*
422 * We're keeping the one we just allocated. Are we now over the
423 * limit? Prune one off the tip of the LRU in trade for the one we
424 * just allocated if so.
425 */
426 if (num_drc_entries >= max_drc_entries)
427 nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
428 struct svc_cacherep, c_lru));
429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 nfsdstats.rcmisses++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 rqstp->rq_cacherep = rp;
432 rp->c_state = RC_INPROG;
433 rp->c_xid = xid;
434 rp->c_proc = proc;
Jeff Layton7b9e8522013-01-28 14:41:07 -0500435 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
436 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 rp->c_prot = proto;
438 rp->c_vers = vers;
Jeff Layton01a7dec2013-02-04 11:57:27 -0500439 rp->c_len = rqstp->rq_arg.len;
440 rp->c_csum = csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 hash_refile(rp);
Jeff Layton56c25482013-02-04 08:18:00 -0500443 lru_put_end(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445 /* release any buffer */
446 if (rp->c_type == RC_REPLBUFF) {
Jeff Layton6c6910c2013-03-27 10:15:38 -0400447 drc_mem_usage -= rp->c_replvec.iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 kfree(rp->c_replvec.iov_base);
449 rp->c_replvec.iov_base = NULL;
450 }
451 rp->c_type = RC_NOCACHE;
452 out:
453 spin_unlock(&cache_lock);
454 return rtn;
455
456found_entry:
Jeff Layton0338dd12013-02-04 08:18:02 -0500457 nfsdstats.rchits++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 /* We found a matching entry which is either in progress or done. */
459 age = jiffies - rp->c_timestamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 lru_put_end(rp);
461
462 rtn = RC_DROPIT;
463 /* Request being processed or excessive rexmits */
464 if (rp->c_state == RC_INPROG || age < RC_DELAY)
465 goto out;
466
467 /* From the hall of fame of impractical attacks:
468 * Is this a user who tries to snoop on the cache? */
469 rtn = RC_DOIT;
470 if (!rqstp->rq_secure && rp->c_secure)
471 goto out;
472
473 /* Compose RPC reply header */
474 switch (rp->c_type) {
475 case RC_NOCACHE:
476 break;
477 case RC_REPLSTAT:
478 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
479 rtn = RC_REPLY;
480 break;
481 case RC_REPLBUFF:
482 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
483 goto out; /* should not happen */
484 rtn = RC_REPLY;
485 break;
486 default:
487 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
Jeff Layton0338dd12013-02-04 08:18:02 -0500488 nfsd_reply_cache_free_locked(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 }
490
491 goto out;
492}
493
494/*
495 * Update a cache entry. This is called from nfsd_dispatch when
496 * the procedure has been executed and the complete reply is in
497 * rqstp->rq_res.
498 *
499 * We're copying around data here rather than swapping buffers because
500 * the toplevel loop requires max-sized buffers, which would be a waste
501 * of memory for a cache with a max reply size of 100 bytes (diropokres).
502 *
503 * If we should start to use different types of cache entries tailored
504 * specifically for attrstat and fh's, we may save even more space.
505 *
506 * Also note that a cachetype of RC_NOCACHE can legally be passed when
507 * nfsd failed to encode a reply that otherwise would have been cached.
508 * In this case, nfsd_cache_update is called with statp == NULL.
509 */
510void
Al Viroc7afef12006-10-19 23:29:02 -0700511nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
Jeff Layton13cc8a72013-02-04 08:18:03 -0500513 struct svc_cacherep *rp = rqstp->rq_cacherep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
515 int len;
Jeff Layton6c6910c2013-03-27 10:15:38 -0400516 size_t bufsize = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Jeff Layton13cc8a72013-02-04 08:18:03 -0500518 if (!rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 return;
520
521 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
522 len >>= 2;
Greg Banksfca42172009-04-01 07:28:13 +1100523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 /* Don't cache excessive amounts of data and XDR failures */
525 if (!statp || len > (256 >> 2)) {
Jeff Layton2c6b6912013-02-04 08:18:04 -0500526 nfsd_reply_cache_free(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 return;
528 }
529
530 switch (cachetype) {
531 case RC_REPLSTAT:
532 if (len != 1)
533 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
534 rp->c_replstat = *statp;
535 break;
536 case RC_REPLBUFF:
537 cachv = &rp->c_replvec;
Jeff Layton6c6910c2013-03-27 10:15:38 -0400538 bufsize = len << 2;
539 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 if (!cachv->iov_base) {
Jeff Layton2c6b6912013-02-04 08:18:04 -0500541 nfsd_reply_cache_free(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 return;
543 }
Jeff Layton6c6910c2013-03-27 10:15:38 -0400544 cachv->iov_len = bufsize;
545 memcpy(cachv->iov_base, statp, bufsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 break;
Jeff Layton2c6b6912013-02-04 08:18:04 -0500547 case RC_NOCACHE:
548 nfsd_reply_cache_free(rp);
549 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
551 spin_lock(&cache_lock);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400552 drc_mem_usage += bufsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 lru_put_end(rp);
554 rp->c_secure = rqstp->rq_secure;
555 rp->c_type = cachetype;
556 rp->c_state = RC_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 spin_unlock(&cache_lock);
558 return;
559}
560
561/*
562 * Copy cached reply to current reply buffer. Should always fit.
563 * FIXME as reply is in a page, we should just attach the page, and
564 * keep a refcount....
565 */
566static int
567nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
568{
569 struct kvec *vec = &rqstp->rq_res.head[0];
570
571 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
572 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
573 data->iov_len);
574 return 0;
575 }
576 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
577 vec->iov_len += data->iov_len;
578 return 1;
579}
Jeff Laytona2f999a2013-03-27 10:15:38 -0400580
581/*
582 * Note that fields may be added, removed or reordered in the future. Programs
583 * scraping this file for info should test the labels to ensure they're
584 * getting the correct field.
585 */
586static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
587{
588 spin_lock(&cache_lock);
589 seq_printf(m, "max entries: %u\n", max_drc_entries);
590 seq_printf(m, "num entries: %u\n", num_drc_entries);
591 seq_printf(m, "hash buckets: %u\n", HASHSIZE);
592 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
593 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
594 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
595 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
596 seq_printf(m, "payload misses: %u\n", payload_misses);
Jeff Layton98d821b2013-03-27 10:15:39 -0400597 seq_printf(m, "longest chain len: %u\n", longest_chain);
598 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
Jeff Laytona2f999a2013-03-27 10:15:38 -0400599 spin_unlock(&cache_lock);
600 return 0;
601}
602
603int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
604{
605 return single_open(file, nfsd_reply_cache_stats_show, NULL);
606}