| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/m68k/mm/memory.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1995  Hamish Macdonald | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | #include <linux/config.h> | 
 | 8 | #include <linux/mm.h> | 
 | 9 | #include <linux/kernel.h> | 
 | 10 | #include <linux/string.h> | 
 | 11 | #include <linux/types.h> | 
 | 12 | #include <linux/slab.h> | 
 | 13 | #include <linux/init.h> | 
 | 14 | #include <linux/pagemap.h> | 
 | 15 |  | 
 | 16 | #include <asm/setup.h> | 
 | 17 | #include <asm/segment.h> | 
 | 18 | #include <asm/page.h> | 
 | 19 | #include <asm/pgalloc.h> | 
 | 20 | #include <asm/system.h> | 
 | 21 | #include <asm/traps.h> | 
 | 22 | #include <asm/machdep.h> | 
 | 23 |  | 
 | 24 |  | 
 | 25 | /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from | 
 | 26 |    struct page instead of separately kmalloced struct.  Stolen from | 
 | 27 |    arch/sparc/mm/srmmu.c ... */ | 
 | 28 |  | 
 | 29 | typedef struct list_head ptable_desc; | 
 | 30 | static LIST_HEAD(ptable_list); | 
 | 31 |  | 
 | 32 | #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) | 
 | 33 | #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) | 
 | 34 | #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) | 
 | 35 |  | 
 | 36 | #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) | 
 | 37 |  | 
 | 38 | void __init init_pointer_table(unsigned long ptable) | 
 | 39 | { | 
 | 40 | 	ptable_desc *dp; | 
 | 41 | 	unsigned long page = ptable & PAGE_MASK; | 
 | 42 | 	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); | 
 | 43 |  | 
 | 44 | 	dp = PD_PTABLE(page); | 
 | 45 | 	if (!(PD_MARKBITS(dp) & mask)) { | 
 | 46 | 		PD_MARKBITS(dp) = 0xff; | 
 | 47 | 		list_add(dp, &ptable_list); | 
 | 48 | 	} | 
 | 49 |  | 
 | 50 | 	PD_MARKBITS(dp) &= ~mask; | 
 | 51 | #ifdef DEBUG | 
 | 52 | 	printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); | 
 | 53 | #endif | 
 | 54 |  | 
 | 55 | 	/* unreserve the page so it's possible to free that page */ | 
 | 56 | 	PD_PAGE(dp)->flags &= ~(1 << PG_reserved); | 
 | 57 | 	set_page_count(PD_PAGE(dp), 1); | 
 | 58 |  | 
 | 59 | 	return; | 
 | 60 | } | 
 | 61 |  | 
 | 62 | pmd_t *get_pointer_table (void) | 
 | 63 | { | 
 | 64 | 	ptable_desc *dp = ptable_list.next; | 
 | 65 | 	unsigned char mask = PD_MARKBITS (dp); | 
 | 66 | 	unsigned char tmp; | 
 | 67 | 	unsigned int off; | 
 | 68 |  | 
 | 69 | 	/* | 
 | 70 | 	 * For a pointer table for a user process address space, a | 
 | 71 | 	 * table is taken from a page allocated for the purpose.  Each | 
 | 72 | 	 * page can hold 8 pointer tables.  The page is remapped in | 
 | 73 | 	 * virtual address space to be noncacheable. | 
 | 74 | 	 */ | 
 | 75 | 	if (mask == 0) { | 
 | 76 | 		void *page; | 
 | 77 | 		ptable_desc *new; | 
 | 78 |  | 
 | 79 | 		if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) | 
 | 80 | 			return NULL; | 
 | 81 |  | 
 | 82 | 		flush_tlb_kernel_page(page); | 
 | 83 | 		nocache_page(page); | 
 | 84 |  | 
 | 85 | 		new = PD_PTABLE(page); | 
 | 86 | 		PD_MARKBITS(new) = 0xfe; | 
 | 87 | 		list_add_tail(new, dp); | 
 | 88 |  | 
 | 89 | 		return (pmd_t *)page; | 
 | 90 | 	} | 
 | 91 |  | 
 | 92 | 	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE) | 
 | 93 | 		; | 
 | 94 | 	PD_MARKBITS(dp) = mask & ~tmp; | 
 | 95 | 	if (!PD_MARKBITS(dp)) { | 
 | 96 | 		/* move to end of list */ | 
 | 97 | 		list_del(dp); | 
 | 98 | 		list_add_tail(dp, &ptable_list); | 
 | 99 | 	} | 
 | 100 | 	return (pmd_t *) (page_address(PD_PAGE(dp)) + off); | 
 | 101 | } | 
 | 102 |  | 
 | 103 | int free_pointer_table (pmd_t *ptable) | 
 | 104 | { | 
 | 105 | 	ptable_desc *dp; | 
 | 106 | 	unsigned long page = (unsigned long)ptable & PAGE_MASK; | 
 | 107 | 	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); | 
 | 108 |  | 
 | 109 | 	dp = PD_PTABLE(page); | 
 | 110 | 	if (PD_MARKBITS (dp) & mask) | 
 | 111 | 		panic ("table already free!"); | 
 | 112 |  | 
 | 113 | 	PD_MARKBITS (dp) |= mask; | 
 | 114 |  | 
 | 115 | 	if (PD_MARKBITS(dp) == 0xff) { | 
 | 116 | 		/* all tables in page are free, free page */ | 
 | 117 | 		list_del(dp); | 
 | 118 | 		cache_page((void *)page); | 
 | 119 | 		free_page (page); | 
 | 120 | 		return 1; | 
 | 121 | 	} else if (ptable_list.next != dp) { | 
 | 122 | 		/* | 
 | 123 | 		 * move this descriptor to the front of the list, since | 
 | 124 | 		 * it has one or more free tables. | 
 | 125 | 		 */ | 
 | 126 | 		list_del(dp); | 
 | 127 | 		list_add(dp, &ptable_list); | 
 | 128 | 	} | 
 | 129 | 	return 0; | 
 | 130 | } | 
 | 131 |  | 
 | 132 | #ifdef DEBUG_INVALID_PTOV | 
 | 133 | int mm_inv_cnt = 5; | 
 | 134 | #endif | 
 | 135 |  | 
 | 136 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | 
 | 137 | /* | 
 | 138 |  * The following two routines map from a physical address to a kernel | 
 | 139 |  * virtual address and vice versa. | 
 | 140 |  */ | 
 | 141 | unsigned long mm_vtop(unsigned long vaddr) | 
 | 142 | { | 
 | 143 | 	int i=0; | 
 | 144 | 	unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET; | 
 | 145 |  | 
 | 146 | 	do { | 
 | 147 | 		if (voff < m68k_memory[i].size) { | 
 | 148 | #ifdef DEBUGPV | 
 | 149 | 			printk ("VTOP(%p)=%lx\n", vaddr, | 
 | 150 | 				m68k_memory[i].addr + voff); | 
 | 151 | #endif | 
 | 152 | 			return m68k_memory[i].addr + voff; | 
 | 153 | 		} | 
 | 154 | 		voff -= m68k_memory[i].size; | 
 | 155 | 	} while (++i < m68k_num_memory); | 
 | 156 |  | 
 | 157 | 	/* As a special case allow `__pa(high_memory)'.  */ | 
 | 158 | 	if (voff == 0) | 
 | 159 | 		return m68k_memory[i-1].addr + m68k_memory[i-1].size; | 
 | 160 |  | 
 | 161 | 	return -1; | 
 | 162 | } | 
 | 163 | #endif | 
 | 164 |  | 
 | 165 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | 
 | 166 | unsigned long mm_ptov (unsigned long paddr) | 
 | 167 | { | 
 | 168 | 	int i = 0; | 
 | 169 | 	unsigned long poff, voff = PAGE_OFFSET; | 
 | 170 |  | 
 | 171 | 	do { | 
 | 172 | 		poff = paddr - m68k_memory[i].addr; | 
 | 173 | 		if (poff < m68k_memory[i].size) { | 
 | 174 | #ifdef DEBUGPV | 
 | 175 | 			printk ("PTOV(%lx)=%lx\n", paddr, poff + voff); | 
 | 176 | #endif | 
 | 177 | 			return poff + voff; | 
 | 178 | 		} | 
 | 179 | 		voff += m68k_memory[i].size; | 
 | 180 | 	} while (++i < m68k_num_memory); | 
 | 181 |  | 
 | 182 | #ifdef DEBUG_INVALID_PTOV | 
 | 183 | 	if (mm_inv_cnt > 0) { | 
 | 184 | 		mm_inv_cnt--; | 
 | 185 | 		printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n", | 
 | 186 | 			paddr, __builtin_return_address(0)); | 
 | 187 | 	} | 
 | 188 | #endif | 
 | 189 | 	return -1; | 
 | 190 | } | 
 | 191 | #endif | 
 | 192 |  | 
 | 193 | /* invalidate page in both caches */ | 
 | 194 | static inline void clear040(unsigned long paddr) | 
 | 195 | { | 
 | 196 | 	asm volatile ( | 
 | 197 | 		"nop\n\t" | 
 | 198 | 		".chip 68040\n\t" | 
 | 199 | 		"cinvp %%bc,(%0)\n\t" | 
 | 200 | 		".chip 68k" | 
 | 201 | 		: : "a" (paddr)); | 
 | 202 | } | 
 | 203 |  | 
 | 204 | /* invalidate page in i-cache */ | 
 | 205 | static inline void cleari040(unsigned long paddr) | 
 | 206 | { | 
 | 207 | 	asm volatile ( | 
 | 208 | 		"nop\n\t" | 
 | 209 | 		".chip 68040\n\t" | 
 | 210 | 		"cinvp %%ic,(%0)\n\t" | 
 | 211 | 		".chip 68k" | 
 | 212 | 		: : "a" (paddr)); | 
 | 213 | } | 
 | 214 |  | 
 | 215 | /* push page in both caches */ | 
 | 216 | /* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */ | 
 | 217 | static inline void push040(unsigned long paddr) | 
 | 218 | { | 
 | 219 | 	asm volatile ( | 
 | 220 | 		"nop\n\t" | 
 | 221 | 		".chip 68040\n\t" | 
 | 222 | 		"cpushp %%bc,(%0)\n\t" | 
 | 223 | 		".chip 68k" | 
 | 224 | 		: : "a" (paddr)); | 
 | 225 | } | 
 | 226 |  | 
 | 227 | /* push and invalidate page in both caches, must disable ints | 
 | 228 |  * to avoid invalidating valid data */ | 
 | 229 | static inline void pushcl040(unsigned long paddr) | 
 | 230 | { | 
 | 231 | 	unsigned long flags; | 
 | 232 |  | 
 | 233 | 	local_irq_save(flags); | 
 | 234 | 	push040(paddr); | 
 | 235 | 	if (CPU_IS_060) | 
 | 236 | 		clear040(paddr); | 
 | 237 | 	local_irq_restore(flags); | 
 | 238 | } | 
 | 239 |  | 
 | 240 | /* | 
 | 241 |  * 040: Hit every page containing an address in the range paddr..paddr+len-1. | 
 | 242 |  * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s). | 
 | 243 |  * Hit every page until there is a page or less to go. Hit the next page, | 
 | 244 |  * and the one after that if the range hits it. | 
 | 245 |  */ | 
 | 246 | /* ++roman: A little bit more care is required here: The CINVP instruction | 
 | 247 |  * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning | 
 | 248 |  * and the end of the region must be treated differently if they are not | 
 | 249 |  * exactly at the beginning or end of a page boundary. Else, maybe too much | 
 | 250 |  * data becomes invalidated and thus lost forever. CPUSHP does what we need: | 
 | 251 |  * it invalidates the page after pushing dirty data to memory. (Thanks to Jes | 
 | 252 |  * for discovering the problem!) | 
 | 253 |  */ | 
 | 254 | /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set | 
 | 255 |  * the DPI bit in the CACR; would it cause problems with temporarily changing | 
 | 256 |  * this?). So we have to push first and then additionally to invalidate. | 
 | 257 |  */ | 
 | 258 |  | 
 | 259 |  | 
 | 260 | /* | 
 | 261 |  * cache_clear() semantics: Clear any cache entries for the area in question, | 
 | 262 |  * without writing back dirty entries first. This is useful if the data will | 
 | 263 |  * be overwritten anyway, e.g. by DMA to memory. The range is defined by a | 
 | 264 |  * _physical_ address. | 
 | 265 |  */ | 
 | 266 |  | 
 | 267 | void cache_clear (unsigned long paddr, int len) | 
 | 268 | { | 
 | 269 |     if (CPU_IS_040_OR_060) { | 
 | 270 | 	int tmp; | 
 | 271 |  | 
 | 272 | 	/* | 
 | 273 | 	 * We need special treatment for the first page, in case it | 
 | 274 | 	 * is not page-aligned. Page align the addresses to work | 
 | 275 | 	 * around bug I17 in the 68060. | 
 | 276 | 	 */ | 
 | 277 | 	if ((tmp = -paddr & (PAGE_SIZE - 1))) { | 
 | 278 | 	    pushcl040(paddr & PAGE_MASK); | 
 | 279 | 	    if ((len -= tmp) <= 0) | 
 | 280 | 		return; | 
 | 281 | 	    paddr += tmp; | 
 | 282 | 	} | 
 | 283 | 	tmp = PAGE_SIZE; | 
 | 284 | 	paddr &= PAGE_MASK; | 
 | 285 | 	while ((len -= tmp) >= 0) { | 
 | 286 | 	    clear040(paddr); | 
 | 287 | 	    paddr += tmp; | 
 | 288 | 	} | 
 | 289 | 	if ((len += tmp)) | 
 | 290 | 	    /* a page boundary gets crossed at the end */ | 
 | 291 | 	    pushcl040(paddr); | 
 | 292 |     } | 
 | 293 |     else /* 68030 or 68020 */ | 
 | 294 | 	asm volatile ("movec %/cacr,%/d0\n\t" | 
 | 295 | 		      "oriw %0,%/d0\n\t" | 
 | 296 | 		      "movec %/d0,%/cacr" | 
 | 297 | 		      : : "i" (FLUSH_I_AND_D) | 
 | 298 | 		      : "d0"); | 
 | 299 | #ifdef CONFIG_M68K_L2_CACHE | 
 | 300 |     if(mach_l2_flush) | 
 | 301 | 	mach_l2_flush(0); | 
 | 302 | #endif | 
 | 303 | } | 
 | 304 |  | 
 | 305 |  | 
 | 306 | /* | 
 | 307 |  * cache_push() semantics: Write back any dirty cache data in the given area, | 
 | 308 |  * and invalidate the range in the instruction cache. It needs not (but may) | 
 | 309 |  * invalidate those entries also in the data cache. The range is defined by a | 
 | 310 |  * _physical_ address. | 
 | 311 |  */ | 
 | 312 |  | 
 | 313 | void cache_push (unsigned long paddr, int len) | 
 | 314 | { | 
 | 315 |     if (CPU_IS_040_OR_060) { | 
 | 316 | 	int tmp = PAGE_SIZE; | 
 | 317 |  | 
 | 318 | 	/* | 
 | 319 |          * on 68040 or 68060, push cache lines for pages in the range; | 
 | 320 | 	 * on the '040 this also invalidates the pushed lines, but not on | 
 | 321 | 	 * the '060! | 
 | 322 | 	 */ | 
 | 323 | 	len += paddr & (PAGE_SIZE - 1); | 
 | 324 |  | 
 | 325 | 	/* | 
 | 326 | 	 * Work around bug I17 in the 68060 affecting some instruction | 
 | 327 | 	 * lines not being invalidated properly. | 
 | 328 | 	 */ | 
 | 329 | 	paddr &= PAGE_MASK; | 
 | 330 |  | 
 | 331 | 	do { | 
 | 332 | 	    push040(paddr); | 
 | 333 | 	    paddr += tmp; | 
 | 334 | 	} while ((len -= tmp) > 0); | 
 | 335 |     } | 
 | 336 |     /* | 
 | 337 |      * 68030/68020 have no writeback cache. On the other hand, | 
 | 338 |      * cache_push is actually a superset of cache_clear (the lines | 
 | 339 |      * get written back and invalidated), so we should make sure | 
 | 340 |      * to perform the corresponding actions. After all, this is getting | 
 | 341 |      * called in places where we've just loaded code, or whatever, so | 
 | 342 |      * flushing the icache is appropriate; flushing the dcache shouldn't | 
 | 343 |      * be required. | 
 | 344 |      */ | 
 | 345 |     else /* 68030 or 68020 */ | 
 | 346 | 	asm volatile ("movec %/cacr,%/d0\n\t" | 
 | 347 | 		      "oriw %0,%/d0\n\t" | 
 | 348 | 		      "movec %/d0,%/cacr" | 
 | 349 | 		      : : "i" (FLUSH_I) | 
 | 350 | 		      : "d0"); | 
 | 351 | #ifdef CONFIG_M68K_L2_CACHE | 
 | 352 |     if(mach_l2_flush) | 
 | 353 | 	mach_l2_flush(1); | 
 | 354 | #endif | 
 | 355 | } | 
 | 356 |  | 
 | 357 | static unsigned long virt_to_phys_slow(unsigned long vaddr) | 
 | 358 | { | 
 | 359 | 	if (CPU_IS_060) { | 
 | 360 | 		mm_segment_t fs = get_fs(); | 
 | 361 | 		unsigned long paddr; | 
 | 362 |  | 
 | 363 | 		set_fs(get_ds()); | 
 | 364 |  | 
 | 365 | 		/* The PLPAR instruction causes an access error if the translation | 
 | 366 | 		 * is not possible. To catch this we use the same exception mechanism | 
 | 367 | 		 * as for user space accesses in <asm/uaccess.h>. */ | 
 | 368 | 		asm volatile (".chip 68060\n" | 
 | 369 | 			      "1: plpar (%0)\n" | 
 | 370 | 			      ".chip 68k\n" | 
 | 371 | 			      "2:\n" | 
 | 372 | 			      ".section .fixup,\"ax\"\n" | 
 | 373 | 			      "   .even\n" | 
 | 374 | 			      "3: sub.l %0,%0\n" | 
 | 375 | 			      "   jra 2b\n" | 
 | 376 | 			      ".previous\n" | 
 | 377 | 			      ".section __ex_table,\"a\"\n" | 
 | 378 | 			      "   .align 4\n" | 
 | 379 | 			      "   .long 1b,3b\n" | 
 | 380 | 			      ".previous" | 
 | 381 | 			      : "=a" (paddr) | 
 | 382 | 			      : "0" (vaddr)); | 
 | 383 | 		set_fs(fs); | 
 | 384 | 		return paddr; | 
 | 385 | 	} else if (CPU_IS_040) { | 
 | 386 | 		mm_segment_t fs = get_fs(); | 
 | 387 | 		unsigned long mmusr; | 
 | 388 |  | 
 | 389 | 		set_fs(get_ds()); | 
 | 390 |  | 
 | 391 | 		asm volatile (".chip 68040\n\t" | 
 | 392 | 			      "ptestr (%1)\n\t" | 
 | 393 | 			      "movec %%mmusr, %0\n\t" | 
 | 394 | 			      ".chip 68k" | 
 | 395 | 			      : "=r" (mmusr) | 
 | 396 | 			      : "a" (vaddr)); | 
 | 397 | 		set_fs(fs); | 
 | 398 |  | 
 | 399 | 		if (mmusr & MMU_R_040) | 
 | 400 | 			return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); | 
 | 401 | 	} else { | 
 | 402 | 		unsigned short mmusr; | 
 | 403 | 		unsigned long *descaddr; | 
 | 404 |  | 
 | 405 | 		asm volatile ("ptestr #5,%2@,#7,%0\n\t" | 
 | 406 | 			      "pmove %%psr,%1@" | 
 | 407 | 			      : "=a&" (descaddr) | 
 | 408 | 			      : "a" (&mmusr), "a" (vaddr)); | 
 | 409 | 		if (mmusr & (MMU_I|MMU_B|MMU_L)) | 
 | 410 | 			return 0; | 
 | 411 | 		descaddr = phys_to_virt((unsigned long)descaddr); | 
 | 412 | 		switch (mmusr & MMU_NUM) { | 
 | 413 | 		case 1: | 
 | 414 | 			return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); | 
 | 415 | 		case 2: | 
 | 416 | 			return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); | 
 | 417 | 		case 3: | 
 | 418 | 			return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); | 
 | 419 | 		} | 
 | 420 | 	} | 
 | 421 | 	return 0; | 
 | 422 | } | 
 | 423 |  | 
 | 424 | /* Push n pages at kernel virtual address and clear the icache */ | 
 | 425 | /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ | 
 | 426 | void flush_icache_range(unsigned long address, unsigned long endaddr) | 
 | 427 | { | 
 | 428 | 	if (CPU_IS_040_OR_060) { | 
 | 429 | 		address &= PAGE_MASK; | 
 | 430 |  | 
 | 431 | 		if (address >= PAGE_OFFSET && address < (unsigned long)high_memory) { | 
 | 432 | 			do { | 
 | 433 | 				asm volatile ("nop\n\t" | 
 | 434 | 					      ".chip 68040\n\t" | 
 | 435 | 					      "cpushp %%bc,(%0)\n\t" | 
 | 436 | 					      ".chip 68k" | 
 | 437 | 					      : : "a" (virt_to_phys((void *)address))); | 
 | 438 | 				address += PAGE_SIZE; | 
 | 439 | 			} while (address < endaddr); | 
 | 440 | 		} else { | 
 | 441 | 			do { | 
 | 442 | 				asm volatile ("nop\n\t" | 
 | 443 | 					      ".chip 68040\n\t" | 
 | 444 | 					      "cpushp %%bc,(%0)\n\t" | 
 | 445 | 					      ".chip 68k" | 
 | 446 | 					      : : "a" (virt_to_phys_slow(address))); | 
 | 447 | 				address += PAGE_SIZE; | 
 | 448 | 			} while (address < endaddr); | 
 | 449 | 		} | 
 | 450 | 	} else { | 
 | 451 | 		unsigned long tmp; | 
 | 452 | 		asm volatile ("movec %%cacr,%0\n\t" | 
 | 453 | 			      "orw %1,%0\n\t" | 
 | 454 | 			      "movec %0,%%cacr" | 
 | 455 | 			      : "=&d" (tmp) | 
 | 456 | 			      : "di" (FLUSH_I)); | 
 | 457 | 	} | 
 | 458 | } | 
 | 459 |  | 
 | 460 |  | 
 | 461 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | 
 | 462 | int mm_end_of_chunk (unsigned long addr, int len) | 
 | 463 | { | 
 | 464 | 	int i; | 
 | 465 |  | 
 | 466 | 	for (i = 0; i < m68k_num_memory; i++) | 
 | 467 | 		if (m68k_memory[i].addr + m68k_memory[i].size == addr + len) | 
 | 468 | 			return 1; | 
 | 469 | 	return 0; | 
 | 470 | } | 
 | 471 | #endif |