Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Cleancache frontend |
| 3 | * |
| 4 | * This code provides the generic "frontend" layer to call a matching |
| 5 | * "backend" driver implementation of cleancache. See |
| 6 | * Documentation/vm/cleancache.txt for more information. |
| 7 | * |
| 8 | * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. |
| 9 | * Author: Dan Magenheimer |
| 10 | * |
| 11 | * This work is licensed under the terms of the GNU GPL, version 2. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/fs.h> |
| 16 | #include <linux/exportfs.h> |
| 17 | #include <linux/mm.h> |
Dan Magenheimer | 417fc2c | 2011-09-21 12:28:04 -0400 | [diff] [blame] | 18 | #include <linux/debugfs.h> |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 19 | #include <linux/cleancache.h> |
| 20 | |
| 21 | /* |
| 22 | * This global enablement flag may be read thousands of times per second |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 23 | * by cleancache_get/put/invalidate even on systems where cleancache_ops |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 24 | * is not claimed (e.g. cleancache is config'ed on but remains |
| 25 | * disabled), so is preferred to the slower alternative: a function |
| 26 | * call that checks a non-global. |
| 27 | */ |
Dan Magenheimer | 072611e | 2011-09-21 12:21:20 -0400 | [diff] [blame] | 28 | int cleancache_enabled __read_mostly; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 29 | EXPORT_SYMBOL(cleancache_enabled); |
| 30 | |
| 31 | /* |
| 32 | * cleancache_ops is set by cleancache_ops_register to contain the pointers |
| 33 | * to the cleancache "backend" implementation functions. |
| 34 | */ |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 35 | static struct cleancache_ops *cleancache_ops __read_mostly; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 36 | |
Dan Magenheimer | 417fc2c | 2011-09-21 12:28:04 -0400 | [diff] [blame] | 37 | /* |
| 38 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is |
| 39 | * properly configured. These are for information only so are not protected |
| 40 | * against increment races. |
| 41 | */ |
| 42 | static u64 cleancache_succ_gets; |
| 43 | static u64 cleancache_failed_gets; |
| 44 | static u64 cleancache_puts; |
| 45 | static u64 cleancache_invalidates; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 46 | |
| 47 | /* |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 48 | * When no backend is registered all calls to init_fs and init_shared_fs |
| 49 | * are registered and fake poolids (FAKE_FS_POOLID_OFFSET or |
| 50 | * FAKE_SHARED_FS_POOLID_OFFSET, plus offset in the respective array |
| 51 | * [shared_|]fs_poolid_map) are given to the respective super block |
| 52 | * (sb->cleancache_poolid) and no tmem_pools are created. When a backend |
| 53 | * registers with cleancache the previous calls to init_fs and init_shared_fs |
| 54 | * are executed to create tmem_pools and set the respective poolids. While no |
| 55 | * backend is registered all "puts", "gets" and "flushes" are ignored or failed. |
| 56 | */ |
| 57 | #define MAX_INITIALIZABLE_FS 32 |
| 58 | #define FAKE_FS_POOLID_OFFSET 1000 |
| 59 | #define FAKE_SHARED_FS_POOLID_OFFSET 2000 |
| 60 | |
| 61 | #define FS_NO_BACKEND (-1) |
| 62 | #define FS_UNKNOWN (-2) |
| 63 | static int fs_poolid_map[MAX_INITIALIZABLE_FS]; |
| 64 | static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS]; |
| 65 | static char *uuids[MAX_INITIALIZABLE_FS]; |
| 66 | /* |
| 67 | * Mutex for the [shared_|]fs_poolid_map to guard against multiple threads |
| 68 | * invoking umount (and ending in __cleancache_invalidate_fs) and also multiple |
| 69 | * threads calling mount (and ending up in __cleancache_init_[shared|]fs). |
| 70 | */ |
| 71 | static DEFINE_MUTEX(poolid_mutex); |
| 72 | /* |
| 73 | * When set to false (default) all calls to the cleancache functions, except |
| 74 | * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 75 | * by the if (!cleancache_ops) return. This means multiple threads (from |
| 76 | * different filesystems) will be checking cleancache_ops. The usage of a |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 77 | * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are |
| 78 | * OK if the time between the backend's have been initialized (and |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 79 | * cleancache_ops has been set to not NULL) and when the filesystems start |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 80 | * actually calling the backends. The inverse (when unloading) is obviously |
| 81 | * not good - but this shim does not do that (yet). |
| 82 | */ |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 83 | |
| 84 | /* |
| 85 | * The backends and filesystems work all asynchronously. This is b/c the |
| 86 | * backends can be built as modules. |
| 87 | * The usual sequence of events is: |
| 88 | * a) mount / -> __cleancache_init_fs is called. We set the |
| 89 | * [shared_|]fs_poolid_map and uuids for. |
| 90 | * |
| 91 | * b). user does I/Os -> we call the rest of __cleancache_* functions |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 92 | * which return immediately as cleancache_ops is false. |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 93 | * |
| 94 | * c). modprobe zcache -> cleancache_register_ops. We init the backend |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 95 | * and set cleancache_ops to true, and for any fs_poolid_map |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 96 | * (which is set by __cleancache_init_fs) we initialize the poolid. |
| 97 | * |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 98 | * d). user does I/Os -> now that cleancache_ops is true all the |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 99 | * __cleancache_* functions can call the backend. They all check |
| 100 | * that fs_poolid_map is valid and if so invoke the backend. |
| 101 | * |
| 102 | * e). umount / -> __cleancache_invalidate_fs, the fs_poolid_map is |
| 103 | * reset (which is the second check in the __cleancache_* ops |
| 104 | * to call the backend). |
| 105 | * |
| 106 | * The sequence of event could also be c), followed by a), and d). and e). The |
| 107 | * c) would not happen anymore. There is also the chance of c), and one thread |
| 108 | * doing a) + d), and another doing e). For that case we depend on the |
| 109 | * filesystem calling __cleancache_invalidate_fs in the proper sequence (so |
| 110 | * that it handles all I/Os before it invalidates the fs (which is last part |
| 111 | * of unmounting process). |
| 112 | * |
| 113 | * Note: The acute reader will notice that there is no "rmmod zcache" case. |
| 114 | * This is b/c the functionality for that is not yet implemented and when |
| 115 | * done, will require some extra locking not yet devised. |
| 116 | */ |
| 117 | |
| 118 | /* |
| 119 | * Register operations for cleancache, returning previous thus allowing |
| 120 | * detection of multiple backends and possible nesting. |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 121 | */ |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 122 | struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops) |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 123 | { |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 124 | struct cleancache_ops *old = cleancache_ops; |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 125 | int i; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 126 | |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 127 | mutex_lock(&poolid_mutex); |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 128 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { |
| 129 | if (fs_poolid_map[i] == FS_NO_BACKEND) |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 130 | fs_poolid_map[i] = ops->init_fs(PAGE_SIZE); |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 131 | if (shared_fs_poolid_map[i] == FS_NO_BACKEND) |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 132 | shared_fs_poolid_map[i] = ops->init_shared_fs |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 133 | (uuids[i], PAGE_SIZE); |
| 134 | } |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 135 | /* |
| 136 | * We MUST set cleancache_ops _after_ we have called the backends |
| 137 | * init_fs or init_shared_fs functions. Otherwise the compiler might |
| 138 | * re-order where cleancache_ops is set in this function. |
| 139 | */ |
| 140 | barrier(); |
| 141 | cleancache_ops = ops; |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 142 | mutex_unlock(&poolid_mutex); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 143 | return old; |
| 144 | } |
| 145 | EXPORT_SYMBOL(cleancache_register_ops); |
| 146 | |
| 147 | /* Called by a cleancache-enabled filesystem at time of mount */ |
| 148 | void __cleancache_init_fs(struct super_block *sb) |
| 149 | { |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 150 | int i; |
| 151 | |
| 152 | mutex_lock(&poolid_mutex); |
| 153 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { |
| 154 | if (fs_poolid_map[i] == FS_UNKNOWN) { |
| 155 | sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET; |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 156 | if (cleancache_ops) |
| 157 | fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE); |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 158 | else |
| 159 | fs_poolid_map[i] = FS_NO_BACKEND; |
| 160 | break; |
| 161 | } |
| 162 | } |
| 163 | mutex_unlock(&poolid_mutex); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 164 | } |
| 165 | EXPORT_SYMBOL(__cleancache_init_fs); |
| 166 | |
| 167 | /* Called by a cleancache-enabled clustered filesystem at time of mount */ |
| 168 | void __cleancache_init_shared_fs(char *uuid, struct super_block *sb) |
| 169 | { |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 170 | int i; |
| 171 | |
| 172 | mutex_lock(&poolid_mutex); |
| 173 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { |
| 174 | if (shared_fs_poolid_map[i] == FS_UNKNOWN) { |
| 175 | sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET; |
| 176 | uuids[i] = uuid; |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 177 | if (cleancache_ops) |
| 178 | shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 179 | (uuid, PAGE_SIZE); |
| 180 | else |
| 181 | shared_fs_poolid_map[i] = FS_NO_BACKEND; |
| 182 | break; |
| 183 | } |
| 184 | } |
| 185 | mutex_unlock(&poolid_mutex); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 186 | } |
| 187 | EXPORT_SYMBOL(__cleancache_init_shared_fs); |
| 188 | |
| 189 | /* |
| 190 | * If the filesystem uses exportable filehandles, use the filehandle as |
| 191 | * the key, else use the inode number. |
| 192 | */ |
| 193 | static int cleancache_get_key(struct inode *inode, |
| 194 | struct cleancache_filekey *key) |
| 195 | { |
Al Viro | b0b0382 | 2012-04-02 14:34:06 -0400 | [diff] [blame] | 196 | int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 197 | int len = 0, maxlen = CLEANCACHE_KEY_MAX; |
| 198 | struct super_block *sb = inode->i_sb; |
| 199 | |
| 200 | key->u.ino = inode->i_ino; |
| 201 | if (sb->s_export_op != NULL) { |
| 202 | fhfn = sb->s_export_op->encode_fh; |
| 203 | if (fhfn) { |
Al Viro | b0b0382 | 2012-04-02 14:34:06 -0400 | [diff] [blame] | 204 | len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL); |
Namjae Jeon | 94e07a75 | 2013-02-17 15:48:11 +0900 | [diff] [blame] | 205 | if (len <= FILEID_ROOT || len == FILEID_INVALID) |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 206 | return -1; |
| 207 | if (maxlen > CLEANCACHE_KEY_MAX) |
| 208 | return -1; |
| 209 | } |
| 210 | } |
| 211 | return 0; |
| 212 | } |
| 213 | |
| 214 | /* |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 215 | * Returns a pool_id that is associated with a given fake poolid. |
| 216 | */ |
| 217 | static int get_poolid_from_fake(int fake_pool_id) |
| 218 | { |
| 219 | if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) |
| 220 | return shared_fs_poolid_map[fake_pool_id - |
| 221 | FAKE_SHARED_FS_POOLID_OFFSET]; |
| 222 | else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) |
| 223 | return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET]; |
| 224 | return FS_NO_BACKEND; |
| 225 | } |
| 226 | |
| 227 | /* |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 228 | * "Get" data from cleancache associated with the poolid/inode/index |
| 229 | * that were specified when the data was put to cleanache and, if |
| 230 | * successful, use it to fill the specified page with data and return 0. |
| 231 | * The pageframe is unchanged and returns -1 if the get fails. |
| 232 | * Page must be locked by caller. |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 233 | * |
| 234 | * The function has two checks before any action is taken - whether |
| 235 | * a backend is registered and whether the sb->cleancache_poolid |
| 236 | * is correct. |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 237 | */ |
| 238 | int __cleancache_get_page(struct page *page) |
| 239 | { |
| 240 | int ret = -1; |
| 241 | int pool_id; |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 242 | int fake_pool_id; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 243 | struct cleancache_filekey key = { .u.key = { 0 } }; |
| 244 | |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 245 | if (!cleancache_ops) { |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 246 | cleancache_failed_gets++; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 247 | goto out; |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 248 | } |
| 249 | |
| 250 | VM_BUG_ON(!PageLocked(page)); |
| 251 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; |
| 252 | if (fake_pool_id < 0) |
| 253 | goto out; |
| 254 | pool_id = get_poolid_from_fake(fake_pool_id); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 255 | |
| 256 | if (cleancache_get_key(page->mapping->host, &key) < 0) |
| 257 | goto out; |
| 258 | |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 259 | if (pool_id >= 0) |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 260 | ret = cleancache_ops->get_page(pool_id, |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 261 | key, page->index, page); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 262 | if (ret == 0) |
| 263 | cleancache_succ_gets++; |
| 264 | else |
| 265 | cleancache_failed_gets++; |
| 266 | out: |
| 267 | return ret; |
| 268 | } |
| 269 | EXPORT_SYMBOL(__cleancache_get_page); |
| 270 | |
| 271 | /* |
| 272 | * "Put" data from a page to cleancache and associate it with the |
| 273 | * (previously-obtained per-filesystem) poolid and the page's, |
| 274 | * inode and page index. Page must be locked. Note that a put_page |
| 275 | * always "succeeds", though a subsequent get_page may succeed or fail. |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 276 | * |
| 277 | * The function has two checks before any action is taken - whether |
| 278 | * a backend is registered and whether the sb->cleancache_poolid |
| 279 | * is correct. |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 280 | */ |
| 281 | void __cleancache_put_page(struct page *page) |
| 282 | { |
| 283 | int pool_id; |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 284 | int fake_pool_id; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 285 | struct cleancache_filekey key = { .u.key = { 0 } }; |
| 286 | |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 287 | if (!cleancache_ops) { |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 288 | cleancache_puts++; |
| 289 | return; |
| 290 | } |
| 291 | |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 292 | VM_BUG_ON(!PageLocked(page)); |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 293 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; |
| 294 | if (fake_pool_id < 0) |
| 295 | return; |
| 296 | |
| 297 | pool_id = get_poolid_from_fake(fake_pool_id); |
| 298 | |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 299 | if (pool_id >= 0 && |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 300 | cleancache_get_key(page->mapping->host, &key) >= 0) { |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 301 | cleancache_ops->put_page(pool_id, key, page->index, page); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 302 | cleancache_puts++; |
| 303 | } |
| 304 | } |
| 305 | EXPORT_SYMBOL(__cleancache_put_page); |
| 306 | |
| 307 | /* |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 308 | * Invalidate any data from cleancache associated with the poolid and the |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 309 | * page's inode and page index so that a subsequent "get" will fail. |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 310 | * |
| 311 | * The function has two checks before any action is taken - whether |
| 312 | * a backend is registered and whether the sb->cleancache_poolid |
| 313 | * is correct. |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 314 | */ |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 315 | void __cleancache_invalidate_page(struct address_space *mapping, |
| 316 | struct page *page) |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 317 | { |
| 318 | /* careful... page->mapping is NULL sometimes when this is called */ |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 319 | int pool_id; |
| 320 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 321 | struct cleancache_filekey key = { .u.key = { 0 } }; |
| 322 | |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 323 | if (!cleancache_ops) |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 324 | return; |
| 325 | |
| 326 | if (fake_pool_id >= 0) { |
| 327 | pool_id = get_poolid_from_fake(fake_pool_id); |
| 328 | if (pool_id < 0) |
| 329 | return; |
| 330 | |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 331 | VM_BUG_ON(!PageLocked(page)); |
| 332 | if (cleancache_get_key(mapping->host, &key) >= 0) { |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 333 | cleancache_ops->invalidate_page(pool_id, |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 334 | key, page->index); |
Dan Magenheimer | 417fc2c | 2011-09-21 12:28:04 -0400 | [diff] [blame] | 335 | cleancache_invalidates++; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 336 | } |
| 337 | } |
| 338 | } |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 339 | EXPORT_SYMBOL(__cleancache_invalidate_page); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 340 | |
| 341 | /* |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 342 | * Invalidate all data from cleancache associated with the poolid and the |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 343 | * mappings's inode so that all subsequent gets to this poolid/inode |
| 344 | * will fail. |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 345 | * |
| 346 | * The function has two checks before any action is taken - whether |
| 347 | * a backend is registered and whether the sb->cleancache_poolid |
| 348 | * is correct. |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 349 | */ |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 350 | void __cleancache_invalidate_inode(struct address_space *mapping) |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 351 | { |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 352 | int pool_id; |
| 353 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 354 | struct cleancache_filekey key = { .u.key = { 0 } }; |
| 355 | |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 356 | if (!cleancache_ops) |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 357 | return; |
| 358 | |
| 359 | if (fake_pool_id < 0) |
| 360 | return; |
| 361 | |
| 362 | pool_id = get_poolid_from_fake(fake_pool_id); |
| 363 | |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 364 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 365 | cleancache_ops->invalidate_inode(pool_id, key); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 366 | } |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 367 | EXPORT_SYMBOL(__cleancache_invalidate_inode); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 368 | |
| 369 | /* |
| 370 | * Called by any cleancache-enabled filesystem at time of unmount; |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 371 | * note that pool_id is surrendered and may be returned by a subsequent |
| 372 | * cleancache_init_fs or cleancache_init_shared_fs. |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 373 | */ |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 374 | void __cleancache_invalidate_fs(struct super_block *sb) |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 375 | { |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 376 | int index; |
| 377 | int fake_pool_id = sb->cleancache_poolid; |
| 378 | int old_poolid = fake_pool_id; |
| 379 | |
| 380 | mutex_lock(&poolid_mutex); |
| 381 | if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) { |
| 382 | index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET; |
| 383 | old_poolid = shared_fs_poolid_map[index]; |
| 384 | shared_fs_poolid_map[index] = FS_UNKNOWN; |
| 385 | uuids[index] = NULL; |
| 386 | } else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) { |
| 387 | index = fake_pool_id - FAKE_FS_POOLID_OFFSET; |
| 388 | old_poolid = fs_poolid_map[index]; |
| 389 | fs_poolid_map[index] = FS_UNKNOWN; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 390 | } |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 391 | sb->cleancache_poolid = -1; |
Konrad Rzeszutek Wilk | 833f866 | 2013-04-30 15:26:57 -0700 | [diff] [blame^] | 392 | if (cleancache_ops) |
| 393 | cleancache_ops->invalidate_fs(old_poolid); |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 394 | mutex_unlock(&poolid_mutex); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 395 | } |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 396 | EXPORT_SYMBOL(__cleancache_invalidate_fs); |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 397 | |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 398 | static int __init init_cleancache(void) |
| 399 | { |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 400 | int i; |
| 401 | |
Dan Magenheimer | 417fc2c | 2011-09-21 12:28:04 -0400 | [diff] [blame] | 402 | #ifdef CONFIG_DEBUG_FS |
| 403 | struct dentry *root = debugfs_create_dir("cleancache", NULL); |
| 404 | if (root == NULL) |
| 405 | return -ENXIO; |
| 406 | debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets); |
| 407 | debugfs_create_u64("failed_gets", S_IRUGO, |
| 408 | root, &cleancache_failed_gets); |
| 409 | debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts); |
| 410 | debugfs_create_u64("invalidates", S_IRUGO, |
| 411 | root, &cleancache_invalidates); |
| 412 | #endif |
Dan Magenheimer | 49a9ab8 | 2013-04-30 15:26:56 -0700 | [diff] [blame] | 413 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { |
| 414 | fs_poolid_map[i] = FS_UNKNOWN; |
| 415 | shared_fs_poolid_map[i] = FS_UNKNOWN; |
| 416 | } |
| 417 | cleancache_enabled = 1; |
Dan Magenheimer | 077b1f8 | 2011-05-26 10:01:36 -0600 | [diff] [blame] | 418 | return 0; |
| 419 | } |
| 420 | module_init(init_cleancache) |