| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 1 | /* | 
 | 2 |  * Xen implementation for transcendent memory (tmem) | 
 | 3 |  * | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 4 |  * Copyright (C) 2009-2011 Oracle Corp.  All rights reserved. | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 5 |  * Author: Dan Magenheimer | 
 | 6 |  */ | 
 | 7 |  | 
 | 8 | #include <linux/kernel.h> | 
 | 9 | #include <linux/types.h> | 
 | 10 | #include <linux/init.h> | 
 | 11 | #include <linux/pagemap.h> | 
 | 12 | #include <linux/cleancache.h> | 
 | 13 |  | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 14 | /* temporary ifdef until include/linux/frontswap.h is upstream */ | 
 | 15 | #ifdef CONFIG_FRONTSWAP | 
 | 16 | #include <linux/frontswap.h> | 
 | 17 | #endif | 
 | 18 |  | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 19 | #include <xen/xen.h> | 
 | 20 | #include <xen/interface/xen.h> | 
 | 21 | #include <asm/xen/hypercall.h> | 
 | 22 | #include <asm/xen/page.h> | 
 | 23 | #include <asm/xen/hypervisor.h> | 
 | 24 |  | 
 | 25 | #define TMEM_CONTROL               0 | 
 | 26 | #define TMEM_NEW_POOL              1 | 
 | 27 | #define TMEM_DESTROY_POOL          2 | 
 | 28 | #define TMEM_NEW_PAGE              3 | 
 | 29 | #define TMEM_PUT_PAGE              4 | 
 | 30 | #define TMEM_GET_PAGE              5 | 
 | 31 | #define TMEM_FLUSH_PAGE            6 | 
 | 32 | #define TMEM_FLUSH_OBJECT          7 | 
 | 33 | #define TMEM_READ                  8 | 
 | 34 | #define TMEM_WRITE                 9 | 
 | 35 | #define TMEM_XCHG                 10 | 
 | 36 |  | 
 | 37 | /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ | 
 | 38 | #define TMEM_POOL_PERSIST          1 | 
 | 39 | #define TMEM_POOL_SHARED           2 | 
 | 40 | #define TMEM_POOL_PAGESIZE_SHIFT   4 | 
 | 41 | #define TMEM_VERSION_SHIFT        24 | 
 | 42 |  | 
 | 43 |  | 
 | 44 | struct tmem_pool_uuid { | 
 | 45 | 	u64 uuid_lo; | 
 | 46 | 	u64 uuid_hi; | 
 | 47 | }; | 
 | 48 |  | 
 | 49 | struct tmem_oid { | 
 | 50 | 	u64 oid[3]; | 
 | 51 | }; | 
 | 52 |  | 
 | 53 | #define TMEM_POOL_PRIVATE_UUID	{ 0, 0 } | 
 | 54 |  | 
 | 55 | /* flags for tmem_ops.new_pool */ | 
 | 56 | #define TMEM_POOL_PERSIST          1 | 
 | 57 | #define TMEM_POOL_SHARED           2 | 
 | 58 |  | 
 | 59 | /* xen tmem foundation ops/hypercalls */ | 
 | 60 |  | 
 | 61 | static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid, | 
 | 62 | 	u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len) | 
 | 63 | { | 
 | 64 | 	struct tmem_op op; | 
 | 65 | 	int rc = 0; | 
 | 66 |  | 
 | 67 | 	op.cmd = tmem_cmd; | 
 | 68 | 	op.pool_id = tmem_pool; | 
 | 69 | 	op.u.gen.oid[0] = oid.oid[0]; | 
 | 70 | 	op.u.gen.oid[1] = oid.oid[1]; | 
 | 71 | 	op.u.gen.oid[2] = oid.oid[2]; | 
 | 72 | 	op.u.gen.index = index; | 
 | 73 | 	op.u.gen.tmem_offset = tmem_offset; | 
 | 74 | 	op.u.gen.pfn_offset = pfn_offset; | 
 | 75 | 	op.u.gen.len = len; | 
 | 76 | 	set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn); | 
 | 77 | 	rc = HYPERVISOR_tmem_op(&op); | 
 | 78 | 	return rc; | 
 | 79 | } | 
 | 80 |  | 
 | 81 | static int xen_tmem_new_pool(struct tmem_pool_uuid uuid, | 
 | 82 | 				u32 flags, unsigned long pagesize) | 
 | 83 | { | 
 | 84 | 	struct tmem_op op; | 
 | 85 | 	int rc = 0, pageshift; | 
 | 86 |  | 
 | 87 | 	for (pageshift = 0; pagesize != 1; pageshift++) | 
 | 88 | 		pagesize >>= 1; | 
 | 89 | 	flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT; | 
 | 90 | 	flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT; | 
 | 91 | 	op.cmd = TMEM_NEW_POOL; | 
 | 92 | 	op.u.new.uuid[0] = uuid.uuid_lo; | 
 | 93 | 	op.u.new.uuid[1] = uuid.uuid_hi; | 
 | 94 | 	op.u.new.flags = flags; | 
 | 95 | 	rc = HYPERVISOR_tmem_op(&op); | 
 | 96 | 	return rc; | 
 | 97 | } | 
 | 98 |  | 
 | 99 | /* xen generic tmem ops */ | 
 | 100 |  | 
 | 101 | static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, | 
 | 102 | 			     u32 index, unsigned long pfn) | 
 | 103 | { | 
 | 104 | 	unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; | 
 | 105 |  | 
 | 106 | 	return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, | 
 | 107 | 		gmfn, 0, 0, 0); | 
 | 108 | } | 
 | 109 |  | 
 | 110 | static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, | 
 | 111 | 			     u32 index, unsigned long pfn) | 
 | 112 | { | 
 | 113 | 	unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; | 
 | 114 |  | 
 | 115 | 	return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, | 
 | 116 | 		gmfn, 0, 0, 0); | 
 | 117 | } | 
 | 118 |  | 
 | 119 | static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) | 
 | 120 | { | 
 | 121 | 	return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index, | 
 | 122 | 		0, 0, 0, 0); | 
 | 123 | } | 
 | 124 |  | 
 | 125 | static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) | 
 | 126 | { | 
 | 127 | 	return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); | 
 | 128 | } | 
 | 129 |  | 
| Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 130 | bool __read_mostly tmem_enabled = false; | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 131 |  | 
 | 132 | static int __init enable_tmem(char *s) | 
 | 133 | { | 
| Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 134 | 	tmem_enabled = true; | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 135 | 	return 1; | 
 | 136 | } | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 137 | __setup("tmem", enable_tmem); | 
 | 138 |  | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 139 | #ifdef CONFIG_CLEANCACHE | 
 | 140 | static int xen_tmem_destroy_pool(u32 pool_id) | 
 | 141 | { | 
 | 142 | 	struct tmem_oid oid = { { 0 } }; | 
 | 143 |  | 
 | 144 | 	return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0); | 
 | 145 | } | 
 | 146 |  | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 147 | /* cleancache ops */ | 
 | 148 |  | 
 | 149 | static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, | 
 | 150 | 				     pgoff_t index, struct page *page) | 
 | 151 | { | 
 | 152 | 	u32 ind = (u32) index; | 
 | 153 | 	struct tmem_oid oid = *(struct tmem_oid *)&key; | 
 | 154 | 	unsigned long pfn = page_to_pfn(page); | 
 | 155 |  | 
 | 156 | 	if (pool < 0) | 
 | 157 | 		return; | 
 | 158 | 	if (ind != index) | 
 | 159 | 		return; | 
 | 160 | 	mb(); /* ensure page is quiescent; tmem may address it with an alias */ | 
 | 161 | 	(void)xen_tmem_put_page((u32)pool, oid, ind, pfn); | 
 | 162 | } | 
 | 163 |  | 
 | 164 | static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, | 
 | 165 | 				    pgoff_t index, struct page *page) | 
 | 166 | { | 
 | 167 | 	u32 ind = (u32) index; | 
 | 168 | 	struct tmem_oid oid = *(struct tmem_oid *)&key; | 
 | 169 | 	unsigned long pfn = page_to_pfn(page); | 
 | 170 | 	int ret; | 
 | 171 |  | 
 | 172 | 	/* translate return values to linux semantics */ | 
 | 173 | 	if (pool < 0) | 
 | 174 | 		return -1; | 
 | 175 | 	if (ind != index) | 
 | 176 | 		return -1; | 
 | 177 | 	ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); | 
 | 178 | 	if (ret == 1) | 
 | 179 | 		return 0; | 
 | 180 | 	else | 
 | 181 | 		return -1; | 
 | 182 | } | 
 | 183 |  | 
 | 184 | static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key, | 
 | 185 | 				       pgoff_t index) | 
 | 186 | { | 
 | 187 | 	u32 ind = (u32) index; | 
 | 188 | 	struct tmem_oid oid = *(struct tmem_oid *)&key; | 
 | 189 |  | 
 | 190 | 	if (pool < 0) | 
 | 191 | 		return; | 
 | 192 | 	if (ind != index) | 
 | 193 | 		return; | 
 | 194 | 	(void)xen_tmem_flush_page((u32)pool, oid, ind); | 
 | 195 | } | 
 | 196 |  | 
 | 197 | static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key) | 
 | 198 | { | 
 | 199 | 	struct tmem_oid oid = *(struct tmem_oid *)&key; | 
 | 200 |  | 
 | 201 | 	if (pool < 0) | 
 | 202 | 		return; | 
 | 203 | 	(void)xen_tmem_flush_object((u32)pool, oid); | 
 | 204 | } | 
 | 205 |  | 
 | 206 | static void tmem_cleancache_flush_fs(int pool) | 
 | 207 | { | 
 | 208 | 	if (pool < 0) | 
 | 209 | 		return; | 
 | 210 | 	(void)xen_tmem_destroy_pool((u32)pool); | 
 | 211 | } | 
 | 212 |  | 
 | 213 | static int tmem_cleancache_init_fs(size_t pagesize) | 
 | 214 | { | 
 | 215 | 	struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID; | 
 | 216 |  | 
 | 217 | 	return xen_tmem_new_pool(uuid_private, 0, pagesize); | 
 | 218 | } | 
 | 219 |  | 
 | 220 | static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) | 
 | 221 | { | 
 | 222 | 	struct tmem_pool_uuid shared_uuid; | 
 | 223 |  | 
 | 224 | 	shared_uuid.uuid_lo = *(u64 *)uuid; | 
 | 225 | 	shared_uuid.uuid_hi = *(u64 *)(&uuid[8]); | 
 | 226 | 	return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); | 
 | 227 | } | 
 | 228 |  | 
| Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 229 | static bool __initdata use_cleancache = true; | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 230 |  | 
 | 231 | static int __init no_cleancache(char *s) | 
 | 232 | { | 
| Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 233 | 	use_cleancache = false; | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 234 | 	return 1; | 
 | 235 | } | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 236 | __setup("nocleancache", no_cleancache); | 
 | 237 |  | 
| Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 238 | static struct cleancache_ops __initdata tmem_cleancache_ops = { | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 239 | 	.put_page = tmem_cleancache_put_page, | 
 | 240 | 	.get_page = tmem_cleancache_get_page, | 
| Dan Magenheimer | 91c6cc9 | 2012-01-12 14:03:25 -0500 | [diff] [blame] | 241 | 	.invalidate_page = tmem_cleancache_flush_page, | 
 | 242 | 	.invalidate_inode = tmem_cleancache_flush_inode, | 
 | 243 | 	.invalidate_fs = tmem_cleancache_flush_fs, | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 244 | 	.init_shared_fs = tmem_cleancache_init_shared_fs, | 
 | 245 | 	.init_fs = tmem_cleancache_init_fs | 
 | 246 | }; | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 247 | #endif | 
 | 248 |  | 
 | 249 | #ifdef CONFIG_FRONTSWAP | 
 | 250 | /* frontswap tmem operations */ | 
 | 251 |  | 
 | 252 | /* a single tmem poolid is used for all frontswap "types" (swapfiles) */ | 
 | 253 | static int tmem_frontswap_poolid; | 
 | 254 |  | 
 | 255 | /* | 
 | 256 |  * Swizzling increases objects per swaptype, increasing tmem concurrency | 
 | 257 |  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS | 
 | 258 |  */ | 
 | 259 | #define SWIZ_BITS		4 | 
 | 260 | #define SWIZ_MASK		((1 << SWIZ_BITS) - 1) | 
 | 261 | #define _oswiz(_type, _ind)	((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) | 
 | 262 | #define iswiz(_ind)		(_ind >> SWIZ_BITS) | 
 | 263 |  | 
 | 264 | static inline struct tmem_oid oswiz(unsigned type, u32 ind) | 
 | 265 | { | 
 | 266 | 	struct tmem_oid oid = { .oid = { 0 } }; | 
 | 267 | 	oid.oid[0] = _oswiz(type, ind); | 
 | 268 | 	return oid; | 
 | 269 | } | 
 | 270 |  | 
 | 271 | /* returns 0 if the page was successfully put into frontswap, -1 if not */ | 
| Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 272 | static int tmem_frontswap_store(unsigned type, pgoff_t offset, | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 273 | 				   struct page *page) | 
 | 274 | { | 
 | 275 | 	u64 ind64 = (u64)offset; | 
 | 276 | 	u32 ind = (u32)offset; | 
 | 277 | 	unsigned long pfn = page_to_pfn(page); | 
 | 278 | 	int pool = tmem_frontswap_poolid; | 
 | 279 | 	int ret; | 
 | 280 |  | 
 | 281 | 	if (pool < 0) | 
 | 282 | 		return -1; | 
 | 283 | 	if (ind64 != ind) | 
 | 284 | 		return -1; | 
 | 285 | 	mb(); /* ensure page is quiescent; tmem may address it with an alias */ | 
 | 286 | 	ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn); | 
 | 287 | 	/* translate Xen tmem return values to linux semantics */ | 
 | 288 | 	if (ret == 1) | 
 | 289 | 		return 0; | 
 | 290 | 	else | 
 | 291 | 		return -1; | 
 | 292 | } | 
 | 293 |  | 
 | 294 | /* | 
 | 295 |  * returns 0 if the page was successfully gotten from frontswap, -1 if | 
 | 296 |  * was not present (should never happen!) | 
 | 297 |  */ | 
| Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 298 | static int tmem_frontswap_load(unsigned type, pgoff_t offset, | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 299 | 				   struct page *page) | 
 | 300 | { | 
 | 301 | 	u64 ind64 = (u64)offset; | 
 | 302 | 	u32 ind = (u32)offset; | 
 | 303 | 	unsigned long pfn = page_to_pfn(page); | 
 | 304 | 	int pool = tmem_frontswap_poolid; | 
 | 305 | 	int ret; | 
 | 306 |  | 
 | 307 | 	if (pool < 0) | 
 | 308 | 		return -1; | 
 | 309 | 	if (ind64 != ind) | 
 | 310 | 		return -1; | 
 | 311 | 	ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn); | 
 | 312 | 	/* translate Xen tmem return values to linux semantics */ | 
 | 313 | 	if (ret == 1) | 
 | 314 | 		return 0; | 
 | 315 | 	else | 
 | 316 | 		return -1; | 
 | 317 | } | 
 | 318 |  | 
 | 319 | /* flush a single page from frontswap */ | 
 | 320 | static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset) | 
 | 321 | { | 
 | 322 | 	u64 ind64 = (u64)offset; | 
 | 323 | 	u32 ind = (u32)offset; | 
 | 324 | 	int pool = tmem_frontswap_poolid; | 
 | 325 |  | 
 | 326 | 	if (pool < 0) | 
 | 327 | 		return; | 
 | 328 | 	if (ind64 != ind) | 
 | 329 | 		return; | 
 | 330 | 	(void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind)); | 
 | 331 | } | 
 | 332 |  | 
 | 333 | /* flush all pages from the passed swaptype */ | 
 | 334 | static void tmem_frontswap_flush_area(unsigned type) | 
 | 335 | { | 
 | 336 | 	int pool = tmem_frontswap_poolid; | 
 | 337 | 	int ind; | 
 | 338 |  | 
 | 339 | 	if (pool < 0) | 
 | 340 | 		return; | 
 | 341 | 	for (ind = SWIZ_MASK; ind >= 0; ind--) | 
 | 342 | 		(void)xen_tmem_flush_object(pool, oswiz(type, ind)); | 
 | 343 | } | 
 | 344 |  | 
 | 345 | static void tmem_frontswap_init(unsigned ignored) | 
 | 346 | { | 
 | 347 | 	struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID; | 
 | 348 |  | 
 | 349 | 	/* a single tmem poolid is used for all frontswap "types" (swapfiles) */ | 
 | 350 | 	if (tmem_frontswap_poolid < 0) | 
 | 351 | 		tmem_frontswap_poolid = | 
 | 352 | 		    xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); | 
 | 353 | } | 
 | 354 |  | 
| Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 355 | static bool __initdata use_frontswap = true; | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 356 |  | 
 | 357 | static int __init no_frontswap(char *s) | 
 | 358 | { | 
| Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 359 | 	use_frontswap = false; | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 360 | 	return 1; | 
 | 361 | } | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 362 | __setup("nofrontswap", no_frontswap); | 
 | 363 |  | 
| Jan Beulich | 8e6f7c2 | 2012-02-03 15:09:04 +0000 | [diff] [blame] | 364 | static struct frontswap_ops __initdata tmem_frontswap_ops = { | 
| Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 365 | 	.store = tmem_frontswap_store, | 
 | 366 | 	.load = tmem_frontswap_load, | 
| Dan Magenheimer | 91c6cc9 | 2012-01-12 14:03:25 -0500 | [diff] [blame] | 367 | 	.invalidate_page = tmem_frontswap_flush_page, | 
 | 368 | 	.invalidate_area = tmem_frontswap_flush_area, | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 369 | 	.init = tmem_frontswap_init | 
 | 370 | }; | 
 | 371 | #endif | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 372 |  | 
 | 373 | static int __init xen_tmem_init(void) | 
 | 374 | { | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 375 | 	if (!xen_domain()) | 
 | 376 | 		return 0; | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 377 | #ifdef CONFIG_FRONTSWAP | 
 | 378 | 	if (tmem_enabled && use_frontswap) { | 
 | 379 | 		char *s = ""; | 
 | 380 | 		struct frontswap_ops old_ops = | 
 | 381 | 			frontswap_register_ops(&tmem_frontswap_ops); | 
 | 382 |  | 
 | 383 | 		tmem_frontswap_poolid = -1; | 
 | 384 | 		if (old_ops.init != NULL) | 
 | 385 | 			s = " (WARNING: frontswap_ops overridden)"; | 
 | 386 | 		printk(KERN_INFO "frontswap enabled, RAM provided by " | 
 | 387 | 				 "Xen Transcendent Memory\n"); | 
 | 388 | 	} | 
 | 389 | #endif | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 390 | #ifdef CONFIG_CLEANCACHE | 
 | 391 | 	BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); | 
 | 392 | 	if (tmem_enabled && use_cleancache) { | 
 | 393 | 		char *s = ""; | 
| Dan Magenheimer | afec6e0 | 2011-06-17 15:06:20 -0600 | [diff] [blame] | 394 | 		struct cleancache_ops old_ops = | 
 | 395 | 			cleancache_register_ops(&tmem_cleancache_ops); | 
| Dan Magenheimer | 5bc20fc | 2011-05-26 10:02:21 -0600 | [diff] [blame] | 396 | 		if (old_ops.init_fs != NULL) | 
 | 397 | 			s = " (WARNING: cleancache_ops overridden)"; | 
 | 398 | 		printk(KERN_INFO "cleancache enabled, RAM provided by " | 
 | 399 | 				 "Xen Transcendent Memory%s\n", s); | 
 | 400 | 	} | 
 | 401 | #endif | 
 | 402 | 	return 0; | 
 | 403 | } | 
 | 404 |  | 
 | 405 | module_init(xen_tmem_init) |