| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/arm/mm/copypage-v6.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved. | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  */ | 
 | 10 | #include <linux/init.h> | 
 | 11 | #include <linux/spinlock.h> | 
 | 12 | #include <linux/mm.h> | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 13 | #include <linux/highmem.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/pgtable.h> | 
 | 16 | #include <asm/shmparam.h> | 
 | 17 | #include <asm/tlbflush.h> | 
 | 18 | #include <asm/cacheflush.h> | 
| Russell King | 46097c7 | 2008-08-10 18:10:19 +0100 | [diff] [blame] | 19 | #include <asm/cachetype.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  | 
| Russell King | 1b2e2b7 | 2006-08-21 17:06:38 +0100 | [diff] [blame] | 21 | #include "mm.h" | 
 | 22 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #if SHMLBA > 16384 | 
 | 24 | #error FIX ME | 
 | 25 | #endif | 
 | 26 |  | 
 | 27 | #define from_address	(0xffff8000) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #define to_address	(0xffffc000) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | static DEFINE_SPINLOCK(v6_lock); | 
 | 31 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* | 
 | 33 |  * Copy the user page.  No aliasing to deal with so we can just | 
 | 34 |  * attack the kernel's existing mapping of these pages. | 
 | 35 |  */ | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 36 | static void v6_copy_user_highpage_nonaliasing(struct page *to, | 
| Russell King | f00a75c | 2009-10-05 15:17:45 +0100 | [diff] [blame] | 37 | 	struct page *from, unsigned long vaddr, struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | { | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 39 | 	void *kto, *kfrom; | 
 | 40 |  | 
 | 41 | 	kfrom = kmap_atomic(from, KM_USER0); | 
 | 42 | 	kto = kmap_atomic(to, KM_USER1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | 	copy_page(kto, kfrom); | 
| Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 44 | 	__cpuc_flush_dcache_area(kto, PAGE_SIZE); | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 45 | 	kunmap_atomic(kto, KM_USER1); | 
 | 46 | 	kunmap_atomic(kfrom, KM_USER0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | } | 
 | 48 |  | 
 | 49 | /* | 
 | 50 |  * Clear the user page.  No aliasing to deal with so we can just | 
 | 51 |  * attack the kernel's existing mapping of this page. | 
 | 52 |  */ | 
| Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 53 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | { | 
| Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 55 | 	void *kaddr = kmap_atomic(page, KM_USER0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | 	clear_page(kaddr); | 
| Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 57 | 	kunmap_atomic(kaddr, KM_USER0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | } | 
 | 59 |  | 
 | 60 | /* | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 61 |  * Discard data in the kernel mapping for the new page. | 
 | 62 |  * FIXME: needs this MCRR to be supported. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 |  */ | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 64 | static void discard_old_kernel_data(void *kto) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | 	__asm__("mcrr	p15, 0, %1, %0, c6	@ 0xec401f06" | 
 | 67 | 	   : | 
 | 68 | 	   : "r" (kto), | 
 | 69 | 	     "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) | 
 | 70 | 	   : "cc"); | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 71 | } | 
 | 72 |  | 
 | 73 | /* | 
 | 74 |  * Copy the page, taking account of the cache colour. | 
 | 75 |  */ | 
 | 76 | static void v6_copy_user_highpage_aliasing(struct page *to, | 
| Russell King | f00a75c | 2009-10-05 15:17:45 +0100 | [diff] [blame] | 77 | 	struct page *from, unsigned long vaddr, struct vm_area_struct *vma) | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 78 | { | 
 | 79 | 	unsigned int offset = CACHE_COLOUR(vaddr); | 
 | 80 | 	unsigned long kfrom, kto; | 
 | 81 |  | 
| Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 82 | 	if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 83 | 		__flush_dcache_page(page_mapping(from), from); | 
 | 84 |  | 
 | 85 | 	/* FIXME: not highmem safe */ | 
 | 86 | 	discard_old_kernel_data(page_address(to)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 |  | 
 | 88 | 	/* | 
 | 89 | 	 * Now copy the page using the same cache colour as the | 
 | 90 | 	 * pages ultimate destination. | 
 | 91 | 	 */ | 
 | 92 | 	spin_lock(&v6_lock); | 
 | 93 |  | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 94 | 	set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); | 
 | 95 | 	set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 97 | 	kfrom = from_address + (offset << PAGE_SHIFT); | 
 | 98 | 	kto   = to_address + (offset << PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 |  | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 100 | 	flush_tlb_kernel_page(kfrom); | 
 | 101 | 	flush_tlb_kernel_page(kto); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 |  | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 103 | 	copy_page((void *)kto, (void *)kfrom); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 |  | 
 | 105 | 	spin_unlock(&v6_lock); | 
 | 106 | } | 
 | 107 |  | 
 | 108 | /* | 
 | 109 |  * Clear the user page.  We need to deal with the aliasing issues, | 
 | 110 |  * so remap the kernel page into the same cache colour as the user | 
 | 111 |  * page. | 
 | 112 |  */ | 
| Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 113 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | { | 
| Russell King | b8a9b66 | 2005-06-20 11:31:09 +0100 | [diff] [blame] | 115 | 	unsigned int offset = CACHE_COLOUR(vaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | 	unsigned long to = to_address + (offset << PAGE_SHIFT); | 
 | 117 |  | 
| Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 118 | 	/* FIXME: not highmem safe */ | 
 | 119 | 	discard_old_kernel_data(page_address(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 |  | 
 | 121 | 	/* | 
 | 122 | 	 * Now clear the page using the same cache colour as | 
 | 123 | 	 * the pages ultimate destination. | 
 | 124 | 	 */ | 
 | 125 | 	spin_lock(&v6_lock); | 
 | 126 |  | 
| Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 127 | 	set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | 	flush_tlb_kernel_page(to); | 
 | 129 | 	clear_page((void *)to); | 
 | 130 |  | 
 | 131 | 	spin_unlock(&v6_lock); | 
 | 132 | } | 
 | 133 |  | 
 | 134 | struct cpu_user_fns v6_user_fns __initdata = { | 
| Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 135 | 	.cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 136 | 	.cpu_copy_user_highpage	= v6_copy_user_highpage_nonaliasing, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | }; | 
 | 138 |  | 
 | 139 | static int __init v6_userpage_init(void) | 
 | 140 | { | 
 | 141 | 	if (cache_is_vipt_aliasing()) { | 
| Russell King | 303c644 | 2008-10-31 16:32:19 +0000 | [diff] [blame] | 142 | 		cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; | 
| Russell King | 063b0a4 | 2008-10-31 15:08:35 +0000 | [diff] [blame] | 143 | 		cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | 	} | 
 | 145 |  | 
 | 146 | 	return 0; | 
 | 147 | } | 
 | 148 |  | 
| Russell King | 08ee4e4 | 2005-05-10 17:30:47 +0100 | [diff] [blame] | 149 | core_initcall(v6_userpage_init); |