| Lennert Buytenhek | 23bdf86 | 2006-03-28 21:00:40 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/arm/lib/copypage-xsc3.S | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2004 Intel Corp. | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  * | 
 | 10 |  * Adapted for 3rd gen XScale core, no more mini-dcache | 
 | 11 |  * Author: Matt Gilbert (matthew.m.gilbert@intel.com) | 
 | 12 |  */ | 
 | 13 |  | 
 | 14 | #include <linux/linkage.h> | 
 | 15 | #include <linux/init.h> | 
 | 16 | #include <asm/asm-offsets.h> | 
 | 17 |  | 
 | 18 | /* | 
 | 19 |  * General note: | 
 | 20 |  *  We don't really want write-allocate cache behaviour for these functions | 
 | 21 |  *  since that will just eat through 8K of the cache. | 
 | 22 |  */ | 
 | 23 |  | 
 | 24 | 	.text | 
 | 25 | 	.align	5 | 
 | 26 | /* | 
 | 27 |  * XSC3 optimised copy_user_page | 
 | 28 |  *  r0 = destination | 
 | 29 |  *  r1 = source | 
 | 30 |  *  r2 = virtual user address of ultimate destination page | 
 | 31 |  * | 
 | 32 |  * The source page may have some clean entries in the cache already, but we | 
 | 33 |  * can safely ignore them - break_cow() will flush them out of the cache | 
 | 34 |  * if we eventually end up using our copied page. | 
 | 35 |  * | 
 | 36 |  */ | 
 | 37 | ENTRY(xsc3_mc_copy_user_page) | 
 | 38 | 	stmfd	sp!, {r4, r5, lr} | 
 | 39 | 	mov	lr, #PAGE_SZ/64-1 | 
 | 40 |  | 
 | 41 | 	pld	[r1, #0] | 
 | 42 | 	pld	[r1, #32] | 
 | 43 | 1:	pld	[r1, #64] | 
 | 44 | 	pld	[r1, #96] | 
 | 45 |  | 
 | 46 | 2:	ldrd	r2, [r1], #8 | 
 | 47 | 	mov	ip, r0 | 
 | 48 | 	ldrd	r4, [r1], #8 | 
 | 49 | 	mcr	p15, 0, ip, c7, c6, 1		@ invalidate | 
 | 50 | 	strd	r2, [r0], #8 | 
 | 51 | 	ldrd	r2, [r1], #8 | 
 | 52 | 	strd	r4, [r0], #8 | 
 | 53 | 	ldrd	r4, [r1], #8 | 
 | 54 | 	strd	r2, [r0], #8 | 
 | 55 | 	strd	r4, [r0], #8 | 
 | 56 | 	ldrd	r2, [r1], #8 | 
 | 57 | 	mov	ip, r0 | 
 | 58 | 	ldrd	r4, [r1], #8 | 
 | 59 | 	mcr	p15, 0, ip, c7, c6, 1		@ invalidate | 
 | 60 | 	strd	r2, [r0], #8 | 
 | 61 | 	ldrd	r2, [r1], #8 | 
 | 62 | 	subs	lr, lr, #1 | 
 | 63 | 	strd	r4, [r0], #8 | 
 | 64 | 	ldrd	r4, [r1], #8 | 
 | 65 | 	strd	r2, [r0], #8 | 
 | 66 | 	strd	r4, [r0], #8 | 
 | 67 | 	bgt	1b | 
 | 68 | 	beq	2b | 
 | 69 |  | 
 | 70 | 	ldmfd	sp!, {r4, r5, pc} | 
 | 71 |  | 
 | 72 | 	.align	5 | 
 | 73 | /* | 
 | 74 |  * XScale optimised clear_user_page | 
 | 75 |  *  r0 = destination | 
 | 76 |  *  r1 = virtual user address of ultimate destination page | 
 | 77 |  */ | 
 | 78 | ENTRY(xsc3_mc_clear_user_page) | 
 | 79 | 	mov	r1, #PAGE_SZ/32 | 
 | 80 | 	mov	r2, #0 | 
 | 81 | 	mov	r3, #0 | 
 | 82 | 1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate line | 
 | 83 | 	strd	r2, [r0], #8 | 
 | 84 | 	strd	r2, [r0], #8 | 
 | 85 | 	strd	r2, [r0], #8 | 
 | 86 | 	strd	r2, [r0], #8 | 
 | 87 | 	subs	r1, r1, #1 | 
 | 88 | 	bne	1b | 
 | 89 | 	mov	pc, lr | 
 | 90 |  | 
 | 91 | 	__INITDATA | 
 | 92 |  | 
 | 93 | 	.type	xsc3_mc_user_fns, #object | 
 | 94 | ENTRY(xsc3_mc_user_fns) | 
 | 95 | 	.long	xsc3_mc_clear_user_page | 
 | 96 | 	.long	xsc3_mc_copy_user_page | 
 | 97 | 	.size	xsc3_mc_user_fns, . - xsc3_mc_user_fns |