| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/fs/mbcache.c | 
|  | 3 | * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org> | 
|  | 4 | */ | 
|  | 5 |  | 
|  | 6 | /* | 
|  | 7 | * Filesystem Meta Information Block Cache (mbcache) | 
|  | 8 | * | 
|  | 9 | * The mbcache caches blocks of block devices that need to be located | 
|  | 10 | * by their device/block number, as well as by other criteria (such | 
|  | 11 | * as the block's contents). | 
|  | 12 | * | 
|  | 13 | * There can only be one cache entry in a cache per device and block number. | 
|  | 14 | * Additional indexes need not be unique in this sense. The number of | 
|  | 15 | * additional indexes (=other criteria) can be hardwired at compile time | 
|  | 16 | * or specified at cache create time. | 
|  | 17 | * | 
|  | 18 | * Each cache entry is of fixed size. An entry may be `valid' or `invalid' | 
|  | 19 | * in the cache. A valid entry is in the main hash tables of the cache, | 
|  | 20 | * and may also be in the lru list. An invalid entry is not in any hashes | 
|  | 21 | * or lists. | 
|  | 22 | * | 
|  | 23 | * A valid cache entry is only in the lru list if no handles refer to it. | 
|  | 24 | * Invalid cache entries will be freed when the last handle to the cache | 
|  | 25 | * entry is released. Entries that cannot be freed immediately are put | 
|  | 26 | * back on the lru list. | 
|  | 27 | */ | 
|  | 28 |  | 
|  | 29 | #include <linux/kernel.h> | 
|  | 30 | #include <linux/module.h> | 
|  | 31 |  | 
|  | 32 | #include <linux/hash.h> | 
|  | 33 | #include <linux/fs.h> | 
|  | 34 | #include <linux/mm.h> | 
|  | 35 | #include <linux/slab.h> | 
|  | 36 | #include <linux/sched.h> | 
|  | 37 | #include <linux/init.h> | 
|  | 38 | #include <linux/mbcache.h> | 
|  | 39 |  | 
|  | 40 |  | 
|  | 41 | #ifdef MB_CACHE_DEBUG | 
|  | 42 | # define mb_debug(f...) do { \ | 
|  | 43 | printk(KERN_DEBUG f); \ | 
|  | 44 | printk("\n"); \ | 
|  | 45 | } while (0) | 
|  | 46 | #define mb_assert(c) do { if (!(c)) \ | 
|  | 47 | printk(KERN_ERR "assertion " #c " failed\n"); \ | 
|  | 48 | } while(0) | 
|  | 49 | #else | 
|  | 50 | # define mb_debug(f...) do { } while(0) | 
|  | 51 | # define mb_assert(c) do { } while(0) | 
|  | 52 | #endif | 
|  | 53 | #define mb_error(f...) do { \ | 
|  | 54 | printk(KERN_ERR f); \ | 
|  | 55 | printk("\n"); \ | 
|  | 56 | } while(0) | 
|  | 57 |  | 
|  | 58 | #define MB_CACHE_WRITER ((unsigned short)~0U >> 1) | 
|  | 59 |  | 
| Adrian Bunk | 75c96f8 | 2005-05-05 16:16:09 -0700 | [diff] [blame] | 60 | static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 |  | 
|  | 62 | MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>"); | 
|  | 63 | MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); | 
|  | 64 | MODULE_LICENSE("GPL"); | 
|  | 65 |  | 
|  | 66 | EXPORT_SYMBOL(mb_cache_create); | 
|  | 67 | EXPORT_SYMBOL(mb_cache_shrink); | 
|  | 68 | EXPORT_SYMBOL(mb_cache_destroy); | 
|  | 69 | EXPORT_SYMBOL(mb_cache_entry_alloc); | 
|  | 70 | EXPORT_SYMBOL(mb_cache_entry_insert); | 
|  | 71 | EXPORT_SYMBOL(mb_cache_entry_release); | 
|  | 72 | EXPORT_SYMBOL(mb_cache_entry_free); | 
|  | 73 | EXPORT_SYMBOL(mb_cache_entry_get); | 
|  | 74 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) | 
|  | 75 | EXPORT_SYMBOL(mb_cache_entry_find_first); | 
|  | 76 | EXPORT_SYMBOL(mb_cache_entry_find_next); | 
|  | 77 | #endif | 
|  | 78 |  | 
|  | 79 | struct mb_cache { | 
|  | 80 | struct list_head		c_cache_list; | 
|  | 81 | const char			*c_name; | 
|  | 82 | struct mb_cache_op		c_op; | 
|  | 83 | atomic_t			c_entry_count; | 
|  | 84 | int				c_bucket_bits; | 
|  | 85 | #ifndef MB_CACHE_INDEXES_COUNT | 
|  | 86 | int				c_indexes_count; | 
|  | 87 | #endif | 
|  | 88 | kmem_cache_t			*c_entry_cache; | 
|  | 89 | struct list_head		*c_block_hash; | 
|  | 90 | struct list_head		*c_indexes_hash[0]; | 
|  | 91 | }; | 
|  | 92 |  | 
|  | 93 |  | 
|  | 94 | /* | 
|  | 95 | * Global data: list of all mbcache's, lru list, and a spinlock for | 
|  | 96 | * accessing cache data structures on SMP machines. The lru list is | 
|  | 97 | * global across all mbcaches. | 
|  | 98 | */ | 
|  | 99 |  | 
|  | 100 | static LIST_HEAD(mb_cache_list); | 
|  | 101 | static LIST_HEAD(mb_cache_lru_list); | 
|  | 102 | static DEFINE_SPINLOCK(mb_cache_spinlock); | 
|  | 103 | static struct shrinker *mb_shrinker; | 
|  | 104 |  | 
|  | 105 | static inline int | 
|  | 106 | mb_cache_indexes(struct mb_cache *cache) | 
|  | 107 | { | 
|  | 108 | #ifdef MB_CACHE_INDEXES_COUNT | 
|  | 109 | return MB_CACHE_INDEXES_COUNT; | 
|  | 110 | #else | 
|  | 111 | return cache->c_indexes_count; | 
|  | 112 | #endif | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | /* | 
|  | 116 | * What the mbcache registers as to get shrunk dynamically. | 
|  | 117 | */ | 
|  | 118 |  | 
| Al Viro | 27496a8 | 2005-10-21 03:20:48 -0400 | [diff] [blame] | 119 | static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 |  | 
|  | 121 |  | 
|  | 122 | static inline int | 
|  | 123 | __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) | 
|  | 124 | { | 
|  | 125 | return !list_empty(&ce->e_block_list); | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 129 | static void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | __mb_cache_entry_unhash(struct mb_cache_entry *ce) | 
|  | 131 | { | 
|  | 132 | int n; | 
|  | 133 |  | 
|  | 134 | if (__mb_cache_entry_is_hashed(ce)) { | 
|  | 135 | list_del_init(&ce->e_block_list); | 
|  | 136 | for (n=0; n<mb_cache_indexes(ce->e_cache); n++) | 
|  | 137 | list_del(&ce->e_indexes[n].o_list); | 
|  | 138 | } | 
|  | 139 | } | 
|  | 140 |  | 
|  | 141 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 142 | static void | 
| Al Viro | 27496a8 | 2005-10-21 03:20:48 -0400 | [diff] [blame] | 143 | __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { | 
|  | 145 | struct mb_cache *cache = ce->e_cache; | 
|  | 146 |  | 
|  | 147 | mb_assert(!(ce->e_used || ce->e_queued)); | 
|  | 148 | if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) { | 
|  | 149 | /* free failed -- put back on the lru list | 
|  | 150 | for freeing later. */ | 
|  | 151 | spin_lock(&mb_cache_spinlock); | 
|  | 152 | list_add(&ce->e_lru_list, &mb_cache_lru_list); | 
|  | 153 | spin_unlock(&mb_cache_spinlock); | 
|  | 154 | } else { | 
|  | 155 | kmem_cache_free(cache->c_entry_cache, ce); | 
|  | 156 | atomic_dec(&cache->c_entry_count); | 
|  | 157 | } | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 |  | 
| Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 161 | static void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) | 
|  | 163 | { | 
|  | 164 | /* Wake up all processes queuing for this cache entry. */ | 
|  | 165 | if (ce->e_queued) | 
|  | 166 | wake_up_all(&mb_cache_queue); | 
|  | 167 | if (ce->e_used >= MB_CACHE_WRITER) | 
|  | 168 | ce->e_used -= MB_CACHE_WRITER; | 
|  | 169 | ce->e_used--; | 
|  | 170 | if (!(ce->e_used || ce->e_queued)) { | 
|  | 171 | if (!__mb_cache_entry_is_hashed(ce)) | 
|  | 172 | goto forget; | 
|  | 173 | mb_assert(list_empty(&ce->e_lru_list)); | 
|  | 174 | list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); | 
|  | 175 | } | 
|  | 176 | spin_unlock(&mb_cache_spinlock); | 
|  | 177 | return; | 
|  | 178 | forget: | 
|  | 179 | spin_unlock(&mb_cache_spinlock); | 
|  | 180 | __mb_cache_entry_forget(ce, GFP_KERNEL); | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 |  | 
|  | 184 | /* | 
|  | 185 | * mb_cache_shrink_fn()  memory pressure callback | 
|  | 186 | * | 
|  | 187 | * This function is called by the kernel memory management when memory | 
|  | 188 | * gets low. | 
|  | 189 | * | 
|  | 190 | * @nr_to_scan: Number of objects to scan | 
|  | 191 | * @gfp_mask: (ignored) | 
|  | 192 | * | 
|  | 193 | * Returns the number of objects which are present in the cache. | 
|  | 194 | */ | 
|  | 195 | static int | 
| Al Viro | 27496a8 | 2005-10-21 03:20:48 -0400 | [diff] [blame] | 196 | mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { | 
|  | 198 | LIST_HEAD(free_list); | 
|  | 199 | struct list_head *l, *ltmp; | 
|  | 200 | int count = 0; | 
|  | 201 |  | 
|  | 202 | spin_lock(&mb_cache_spinlock); | 
|  | 203 | list_for_each(l, &mb_cache_list) { | 
|  | 204 | struct mb_cache *cache = | 
|  | 205 | list_entry(l, struct mb_cache, c_cache_list); | 
|  | 206 | mb_debug("cache %s (%d)", cache->c_name, | 
|  | 207 | atomic_read(&cache->c_entry_count)); | 
|  | 208 | count += atomic_read(&cache->c_entry_count); | 
|  | 209 | } | 
|  | 210 | mb_debug("trying to free %d entries", nr_to_scan); | 
|  | 211 | if (nr_to_scan == 0) { | 
|  | 212 | spin_unlock(&mb_cache_spinlock); | 
|  | 213 | goto out; | 
|  | 214 | } | 
|  | 215 | while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { | 
|  | 216 | struct mb_cache_entry *ce = | 
|  | 217 | list_entry(mb_cache_lru_list.next, | 
|  | 218 | struct mb_cache_entry, e_lru_list); | 
|  | 219 | list_move_tail(&ce->e_lru_list, &free_list); | 
|  | 220 | __mb_cache_entry_unhash(ce); | 
|  | 221 | } | 
|  | 222 | spin_unlock(&mb_cache_spinlock); | 
|  | 223 | list_for_each_safe(l, ltmp, &free_list) { | 
|  | 224 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, | 
|  | 225 | e_lru_list), gfp_mask); | 
|  | 226 | } | 
|  | 227 | out: | 
|  | 228 | return (count / 100) * sysctl_vfs_cache_pressure; | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 |  | 
|  | 232 | /* | 
|  | 233 | * mb_cache_create()  create a new cache | 
|  | 234 | * | 
|  | 235 | * All entries in one cache are equal size. Cache entries may be from | 
|  | 236 | * multiple devices. If this is the first mbcache created, registers | 
|  | 237 | * the cache with kernel memory management. Returns NULL if no more | 
|  | 238 | * memory was available. | 
|  | 239 | * | 
|  | 240 | * @name: name of the cache (informal) | 
|  | 241 | * @cache_op: contains the callback called when freeing a cache entry | 
|  | 242 | * @entry_size: The size of a cache entry, including | 
|  | 243 | *              struct mb_cache_entry | 
|  | 244 | * @indexes_count: number of additional indexes in the cache. Must equal | 
|  | 245 | *                 MB_CACHE_INDEXES_COUNT if the number of indexes is | 
|  | 246 | *                 hardwired. | 
|  | 247 | * @bucket_bits: log2(number of hash buckets) | 
|  | 248 | */ | 
|  | 249 | struct mb_cache * | 
|  | 250 | mb_cache_create(const char *name, struct mb_cache_op *cache_op, | 
|  | 251 | size_t entry_size, int indexes_count, int bucket_bits) | 
|  | 252 | { | 
|  | 253 | int m=0, n, bucket_count = 1 << bucket_bits; | 
|  | 254 | struct mb_cache *cache = NULL; | 
|  | 255 |  | 
|  | 256 | if(entry_size < sizeof(struct mb_cache_entry) + | 
|  | 257 | indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0])) | 
|  | 258 | return NULL; | 
|  | 259 |  | 
|  | 260 | cache = kmalloc(sizeof(struct mb_cache) + | 
|  | 261 | indexes_count * sizeof(struct list_head), GFP_KERNEL); | 
|  | 262 | if (!cache) | 
|  | 263 | goto fail; | 
|  | 264 | cache->c_name = name; | 
|  | 265 | cache->c_op.free = NULL; | 
|  | 266 | if (cache_op) | 
|  | 267 | cache->c_op.free = cache_op->free; | 
|  | 268 | atomic_set(&cache->c_entry_count, 0); | 
|  | 269 | cache->c_bucket_bits = bucket_bits; | 
|  | 270 | #ifdef MB_CACHE_INDEXES_COUNT | 
|  | 271 | mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT); | 
|  | 272 | #else | 
|  | 273 | cache->c_indexes_count = indexes_count; | 
|  | 274 | #endif | 
|  | 275 | cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), | 
|  | 276 | GFP_KERNEL); | 
|  | 277 | if (!cache->c_block_hash) | 
|  | 278 | goto fail; | 
|  | 279 | for (n=0; n<bucket_count; n++) | 
|  | 280 | INIT_LIST_HEAD(&cache->c_block_hash[n]); | 
|  | 281 | for (m=0; m<indexes_count; m++) { | 
|  | 282 | cache->c_indexes_hash[m] = kmalloc(bucket_count * | 
|  | 283 | sizeof(struct list_head), | 
|  | 284 | GFP_KERNEL); | 
|  | 285 | if (!cache->c_indexes_hash[m]) | 
|  | 286 | goto fail; | 
|  | 287 | for (n=0; n<bucket_count; n++) | 
|  | 288 | INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); | 
|  | 289 | } | 
|  | 290 | cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, | 
| Paul Jackson | 4b6a931 | 2006-03-24 03:16:05 -0800 | [diff] [blame] | 291 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | if (!cache->c_entry_cache) | 
|  | 293 | goto fail; | 
|  | 294 |  | 
|  | 295 | spin_lock(&mb_cache_spinlock); | 
|  | 296 | list_add(&cache->c_cache_list, &mb_cache_list); | 
|  | 297 | spin_unlock(&mb_cache_spinlock); | 
|  | 298 | return cache; | 
|  | 299 |  | 
|  | 300 | fail: | 
|  | 301 | if (cache) { | 
|  | 302 | while (--m >= 0) | 
|  | 303 | kfree(cache->c_indexes_hash[m]); | 
| Jesper Juhl | f99d49a | 2005-11-07 01:01:34 -0800 | [diff] [blame] | 304 | kfree(cache->c_block_hash); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | kfree(cache); | 
|  | 306 | } | 
|  | 307 | return NULL; | 
|  | 308 | } | 
|  | 309 |  | 
|  | 310 |  | 
|  | 311 | /* | 
|  | 312 | * mb_cache_shrink() | 
|  | 313 | * | 
| Alexey Dobriyan | 7f927fc | 2006-03-28 01:56:53 -0800 | [diff] [blame] | 314 | * Removes all cache entries of a device from the cache. All cache entries | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | * currently in use cannot be freed, and thus remain in the cache. All others | 
|  | 316 | * are freed. | 
|  | 317 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | * @bdev: which device's cache entries to shrink | 
|  | 319 | */ | 
|  | 320 | void | 
| Andreas Gruenbacher | 8c52ab4 | 2005-07-27 11:45:15 -0700 | [diff] [blame] | 321 | mb_cache_shrink(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | { | 
|  | 323 | LIST_HEAD(free_list); | 
|  | 324 | struct list_head *l, *ltmp; | 
|  | 325 |  | 
|  | 326 | spin_lock(&mb_cache_spinlock); | 
|  | 327 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { | 
|  | 328 | struct mb_cache_entry *ce = | 
|  | 329 | list_entry(l, struct mb_cache_entry, e_lru_list); | 
|  | 330 | if (ce->e_bdev == bdev) { | 
|  | 331 | list_move_tail(&ce->e_lru_list, &free_list); | 
|  | 332 | __mb_cache_entry_unhash(ce); | 
|  | 333 | } | 
|  | 334 | } | 
|  | 335 | spin_unlock(&mb_cache_spinlock); | 
|  | 336 | list_for_each_safe(l, ltmp, &free_list) { | 
|  | 337 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, | 
|  | 338 | e_lru_list), GFP_KERNEL); | 
|  | 339 | } | 
|  | 340 | } | 
|  | 341 |  | 
|  | 342 |  | 
|  | 343 | /* | 
|  | 344 | * mb_cache_destroy() | 
|  | 345 | * | 
|  | 346 | * Shrinks the cache to its minimum possible size (hopefully 0 entries), | 
|  | 347 | * and then destroys it. If this was the last mbcache, un-registers the | 
|  | 348 | * mbcache from kernel memory management. | 
|  | 349 | */ | 
|  | 350 | void | 
|  | 351 | mb_cache_destroy(struct mb_cache *cache) | 
|  | 352 | { | 
|  | 353 | LIST_HEAD(free_list); | 
|  | 354 | struct list_head *l, *ltmp; | 
|  | 355 | int n; | 
|  | 356 |  | 
|  | 357 | spin_lock(&mb_cache_spinlock); | 
|  | 358 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { | 
|  | 359 | struct mb_cache_entry *ce = | 
|  | 360 | list_entry(l, struct mb_cache_entry, e_lru_list); | 
|  | 361 | if (ce->e_cache == cache) { | 
|  | 362 | list_move_tail(&ce->e_lru_list, &free_list); | 
|  | 363 | __mb_cache_entry_unhash(ce); | 
|  | 364 | } | 
|  | 365 | } | 
|  | 366 | list_del(&cache->c_cache_list); | 
|  | 367 | spin_unlock(&mb_cache_spinlock); | 
|  | 368 |  | 
|  | 369 | list_for_each_safe(l, ltmp, &free_list) { | 
|  | 370 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, | 
|  | 371 | e_lru_list), GFP_KERNEL); | 
|  | 372 | } | 
|  | 373 |  | 
|  | 374 | if (atomic_read(&cache->c_entry_count) > 0) { | 
|  | 375 | mb_error("cache %s: %d orphaned entries", | 
|  | 376 | cache->c_name, | 
|  | 377 | atomic_read(&cache->c_entry_count)); | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | kmem_cache_destroy(cache->c_entry_cache); | 
|  | 381 |  | 
|  | 382 | for (n=0; n < mb_cache_indexes(cache); n++) | 
|  | 383 | kfree(cache->c_indexes_hash[n]); | 
|  | 384 | kfree(cache->c_block_hash); | 
|  | 385 | kfree(cache); | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 |  | 
|  | 389 | /* | 
|  | 390 | * mb_cache_entry_alloc() | 
|  | 391 | * | 
|  | 392 | * Allocates a new cache entry. The new entry will not be valid initially, | 
|  | 393 | * and thus cannot be looked up yet. It should be filled with data, and | 
|  | 394 | * then inserted into the cache using mb_cache_entry_insert(). Returns NULL | 
|  | 395 | * if no more memory was available. | 
|  | 396 | */ | 
|  | 397 | struct mb_cache_entry * | 
|  | 398 | mb_cache_entry_alloc(struct mb_cache *cache) | 
|  | 399 | { | 
|  | 400 | struct mb_cache_entry *ce; | 
|  | 401 |  | 
|  | 402 | atomic_inc(&cache->c_entry_count); | 
|  | 403 | ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL); | 
|  | 404 | if (ce) { | 
|  | 405 | INIT_LIST_HEAD(&ce->e_lru_list); | 
|  | 406 | INIT_LIST_HEAD(&ce->e_block_list); | 
|  | 407 | ce->e_cache = cache; | 
|  | 408 | ce->e_used = 1 + MB_CACHE_WRITER; | 
|  | 409 | ce->e_queued = 0; | 
|  | 410 | } | 
|  | 411 | return ce; | 
|  | 412 | } | 
|  | 413 |  | 
|  | 414 |  | 
|  | 415 | /* | 
|  | 416 | * mb_cache_entry_insert() | 
|  | 417 | * | 
|  | 418 | * Inserts an entry that was allocated using mb_cache_entry_alloc() into | 
|  | 419 | * the cache. After this, the cache entry can be looked up, but is not yet | 
|  | 420 | * in the lru list as the caller still holds a handle to it. Returns 0 on | 
|  | 421 | * success, or -EBUSY if a cache entry for that device + inode exists | 
|  | 422 | * already (this may happen after a failed lookup, but when another process | 
|  | 423 | * has inserted the same cache entry in the meantime). | 
|  | 424 | * | 
|  | 425 | * @bdev: device the cache entry belongs to | 
|  | 426 | * @block: block number | 
|  | 427 | * @keys: array of additional keys. There must be indexes_count entries | 
|  | 428 | *        in the array (as specified when creating the cache). | 
|  | 429 | */ | 
|  | 430 | int | 
|  | 431 | mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, | 
|  | 432 | sector_t block, unsigned int keys[]) | 
|  | 433 | { | 
|  | 434 | struct mb_cache *cache = ce->e_cache; | 
|  | 435 | unsigned int bucket; | 
|  | 436 | struct list_head *l; | 
|  | 437 | int error = -EBUSY, n; | 
|  | 438 |  | 
|  | 439 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), | 
|  | 440 | cache->c_bucket_bits); | 
|  | 441 | spin_lock(&mb_cache_spinlock); | 
|  | 442 | list_for_each_prev(l, &cache->c_block_hash[bucket]) { | 
|  | 443 | struct mb_cache_entry *ce = | 
|  | 444 | list_entry(l, struct mb_cache_entry, e_block_list); | 
|  | 445 | if (ce->e_bdev == bdev && ce->e_block == block) | 
|  | 446 | goto out; | 
|  | 447 | } | 
|  | 448 | __mb_cache_entry_unhash(ce); | 
|  | 449 | ce->e_bdev = bdev; | 
|  | 450 | ce->e_block = block; | 
|  | 451 | list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); | 
|  | 452 | for (n=0; n<mb_cache_indexes(cache); n++) { | 
|  | 453 | ce->e_indexes[n].o_key = keys[n]; | 
|  | 454 | bucket = hash_long(keys[n], cache->c_bucket_bits); | 
|  | 455 | list_add(&ce->e_indexes[n].o_list, | 
|  | 456 | &cache->c_indexes_hash[n][bucket]); | 
|  | 457 | } | 
|  | 458 | error = 0; | 
|  | 459 | out: | 
|  | 460 | spin_unlock(&mb_cache_spinlock); | 
|  | 461 | return error; | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 |  | 
|  | 465 | /* | 
|  | 466 | * mb_cache_entry_release() | 
|  | 467 | * | 
|  | 468 | * Release a handle to a cache entry. When the last handle to a cache entry | 
|  | 469 | * is released it is either freed (if it is invalid) or otherwise inserted | 
|  | 470 | * in to the lru list. | 
|  | 471 | */ | 
|  | 472 | void | 
|  | 473 | mb_cache_entry_release(struct mb_cache_entry *ce) | 
|  | 474 | { | 
|  | 475 | spin_lock(&mb_cache_spinlock); | 
|  | 476 | __mb_cache_entry_release_unlock(ce); | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 |  | 
|  | 480 | /* | 
|  | 481 | * mb_cache_entry_free() | 
|  | 482 | * | 
|  | 483 | * This is equivalent to the sequence mb_cache_entry_takeout() -- | 
|  | 484 | * mb_cache_entry_release(). | 
|  | 485 | */ | 
|  | 486 | void | 
|  | 487 | mb_cache_entry_free(struct mb_cache_entry *ce) | 
|  | 488 | { | 
|  | 489 | spin_lock(&mb_cache_spinlock); | 
|  | 490 | mb_assert(list_empty(&ce->e_lru_list)); | 
|  | 491 | __mb_cache_entry_unhash(ce); | 
|  | 492 | __mb_cache_entry_release_unlock(ce); | 
|  | 493 | } | 
|  | 494 |  | 
|  | 495 |  | 
|  | 496 | /* | 
|  | 497 | * mb_cache_entry_get() | 
|  | 498 | * | 
|  | 499 | * Get a cache entry  by device / block number. (There can only be one entry | 
|  | 500 | * in the cache per device and block.) Returns NULL if no such cache entry | 
|  | 501 | * exists. The returned cache entry is locked for exclusive access ("single | 
|  | 502 | * writer"). | 
|  | 503 | */ | 
|  | 504 | struct mb_cache_entry * | 
|  | 505 | mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, | 
|  | 506 | sector_t block) | 
|  | 507 | { | 
|  | 508 | unsigned int bucket; | 
|  | 509 | struct list_head *l; | 
|  | 510 | struct mb_cache_entry *ce; | 
|  | 511 |  | 
|  | 512 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), | 
|  | 513 | cache->c_bucket_bits); | 
|  | 514 | spin_lock(&mb_cache_spinlock); | 
|  | 515 | list_for_each(l, &cache->c_block_hash[bucket]) { | 
|  | 516 | ce = list_entry(l, struct mb_cache_entry, e_block_list); | 
|  | 517 | if (ce->e_bdev == bdev && ce->e_block == block) { | 
|  | 518 | DEFINE_WAIT(wait); | 
|  | 519 |  | 
|  | 520 | if (!list_empty(&ce->e_lru_list)) | 
|  | 521 | list_del_init(&ce->e_lru_list); | 
|  | 522 |  | 
|  | 523 | while (ce->e_used > 0) { | 
|  | 524 | ce->e_queued++; | 
|  | 525 | prepare_to_wait(&mb_cache_queue, &wait, | 
|  | 526 | TASK_UNINTERRUPTIBLE); | 
|  | 527 | spin_unlock(&mb_cache_spinlock); | 
|  | 528 | schedule(); | 
|  | 529 | spin_lock(&mb_cache_spinlock); | 
|  | 530 | ce->e_queued--; | 
|  | 531 | } | 
|  | 532 | finish_wait(&mb_cache_queue, &wait); | 
|  | 533 | ce->e_used += 1 + MB_CACHE_WRITER; | 
|  | 534 |  | 
|  | 535 | if (!__mb_cache_entry_is_hashed(ce)) { | 
|  | 536 | __mb_cache_entry_release_unlock(ce); | 
|  | 537 | return NULL; | 
|  | 538 | } | 
|  | 539 | goto cleanup; | 
|  | 540 | } | 
|  | 541 | } | 
|  | 542 | ce = NULL; | 
|  | 543 |  | 
|  | 544 | cleanup: | 
|  | 545 | spin_unlock(&mb_cache_spinlock); | 
|  | 546 | return ce; | 
|  | 547 | } | 
|  | 548 |  | 
|  | 549 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) | 
|  | 550 |  | 
|  | 551 | static struct mb_cache_entry * | 
|  | 552 | __mb_cache_entry_find(struct list_head *l, struct list_head *head, | 
|  | 553 | int index, struct block_device *bdev, unsigned int key) | 
|  | 554 | { | 
|  | 555 | while (l != head) { | 
|  | 556 | struct mb_cache_entry *ce = | 
|  | 557 | list_entry(l, struct mb_cache_entry, | 
|  | 558 | e_indexes[index].o_list); | 
|  | 559 | if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) { | 
|  | 560 | DEFINE_WAIT(wait); | 
|  | 561 |  | 
|  | 562 | if (!list_empty(&ce->e_lru_list)) | 
|  | 563 | list_del_init(&ce->e_lru_list); | 
|  | 564 |  | 
|  | 565 | /* Incrementing before holding the lock gives readers | 
|  | 566 | priority over writers. */ | 
|  | 567 | ce->e_used++; | 
|  | 568 | while (ce->e_used >= MB_CACHE_WRITER) { | 
|  | 569 | ce->e_queued++; | 
|  | 570 | prepare_to_wait(&mb_cache_queue, &wait, | 
|  | 571 | TASK_UNINTERRUPTIBLE); | 
|  | 572 | spin_unlock(&mb_cache_spinlock); | 
|  | 573 | schedule(); | 
|  | 574 | spin_lock(&mb_cache_spinlock); | 
|  | 575 | ce->e_queued--; | 
|  | 576 | } | 
|  | 577 | finish_wait(&mb_cache_queue, &wait); | 
|  | 578 |  | 
|  | 579 | if (!__mb_cache_entry_is_hashed(ce)) { | 
|  | 580 | __mb_cache_entry_release_unlock(ce); | 
|  | 581 | spin_lock(&mb_cache_spinlock); | 
|  | 582 | return ERR_PTR(-EAGAIN); | 
|  | 583 | } | 
|  | 584 | return ce; | 
|  | 585 | } | 
|  | 586 | l = l->next; | 
|  | 587 | } | 
|  | 588 | return NULL; | 
|  | 589 | } | 
|  | 590 |  | 
|  | 591 |  | 
|  | 592 | /* | 
|  | 593 | * mb_cache_entry_find_first() | 
|  | 594 | * | 
|  | 595 | * Find the first cache entry on a given device with a certain key in | 
|  | 596 | * an additional index. Additonal matches can be found with | 
|  | 597 | * mb_cache_entry_find_next(). Returns NULL if no match was found. The | 
|  | 598 | * returned cache entry is locked for shared access ("multiple readers"). | 
|  | 599 | * | 
|  | 600 | * @cache: the cache to search | 
|  | 601 | * @index: the number of the additonal index to search (0<=index<indexes_count) | 
|  | 602 | * @bdev: the device the cache entry should belong to | 
|  | 603 | * @key: the key in the index | 
|  | 604 | */ | 
|  | 605 | struct mb_cache_entry * | 
|  | 606 | mb_cache_entry_find_first(struct mb_cache *cache, int index, | 
|  | 607 | struct block_device *bdev, unsigned int key) | 
|  | 608 | { | 
|  | 609 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); | 
|  | 610 | struct list_head *l; | 
|  | 611 | struct mb_cache_entry *ce; | 
|  | 612 |  | 
|  | 613 | mb_assert(index < mb_cache_indexes(cache)); | 
|  | 614 | spin_lock(&mb_cache_spinlock); | 
|  | 615 | l = cache->c_indexes_hash[index][bucket].next; | 
|  | 616 | ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], | 
|  | 617 | index, bdev, key); | 
|  | 618 | spin_unlock(&mb_cache_spinlock); | 
|  | 619 | return ce; | 
|  | 620 | } | 
|  | 621 |  | 
|  | 622 |  | 
|  | 623 | /* | 
|  | 624 | * mb_cache_entry_find_next() | 
|  | 625 | * | 
|  | 626 | * Find the next cache entry on a given device with a certain key in an | 
|  | 627 | * additional index. Returns NULL if no match could be found. The previous | 
|  | 628 | * entry is atomatically released, so that mb_cache_entry_find_next() can | 
|  | 629 | * be called like this: | 
|  | 630 | * | 
|  | 631 | * entry = mb_cache_entry_find_first(); | 
|  | 632 | * while (entry) { | 
|  | 633 | * 	... | 
|  | 634 | *	entry = mb_cache_entry_find_next(entry, ...); | 
|  | 635 | * } | 
|  | 636 | * | 
|  | 637 | * @prev: The previous match | 
|  | 638 | * @index: the number of the additonal index to search (0<=index<indexes_count) | 
|  | 639 | * @bdev: the device the cache entry should belong to | 
|  | 640 | * @key: the key in the index | 
|  | 641 | */ | 
|  | 642 | struct mb_cache_entry * | 
|  | 643 | mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, | 
|  | 644 | struct block_device *bdev, unsigned int key) | 
|  | 645 | { | 
|  | 646 | struct mb_cache *cache = prev->e_cache; | 
|  | 647 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); | 
|  | 648 | struct list_head *l; | 
|  | 649 | struct mb_cache_entry *ce; | 
|  | 650 |  | 
|  | 651 | mb_assert(index < mb_cache_indexes(cache)); | 
|  | 652 | spin_lock(&mb_cache_spinlock); | 
|  | 653 | l = prev->e_indexes[index].o_list.next; | 
|  | 654 | ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], | 
|  | 655 | index, bdev, key); | 
|  | 656 | __mb_cache_entry_release_unlock(prev); | 
|  | 657 | return ce; | 
|  | 658 | } | 
|  | 659 |  | 
|  | 660 | #endif  /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */ | 
|  | 661 |  | 
|  | 662 | static int __init init_mbcache(void) | 
|  | 663 | { | 
|  | 664 | mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn); | 
|  | 665 | return 0; | 
|  | 666 | } | 
|  | 667 |  | 
|  | 668 | static void __exit exit_mbcache(void) | 
|  | 669 | { | 
|  | 670 | remove_shrinker(mb_shrinker); | 
|  | 671 | } | 
|  | 672 |  | 
|  | 673 | module_init(init_mbcache) | 
|  | 674 | module_exit(exit_mbcache) | 
|  | 675 |  |