| Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2007 ARM Limited | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | * | 
|  | 10 | * This program is distributed in the hope that it will be useful, | 
|  | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 13 | * GNU General Public License for more details. | 
|  | 14 | * | 
|  | 15 | * You should have received a copy of the GNU General Public License | 
|  | 16 | * along with this program; if not, write to the Free Software | 
|  | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 
|  | 18 | */ | 
|  | 19 | #include <linux/init.h> | 
|  | 20 | #include <linux/spinlock.h> | 
| Russell King | 0ba8b9b2 | 2008-08-10 18:08:10 +0100 | [diff] [blame] | 21 | #include <linux/io.h> | 
| Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 22 |  | 
|  | 23 | #include <asm/system.h> | 
| Russell King | 0ba8b9b2 | 2008-08-10 18:08:10 +0100 | [diff] [blame] | 24 | #include <asm/cputype.h> | 
| Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 25 | #include <asm/cacheflush.h> | 
| Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 26 |  | 
|  | 27 | #define CR_L2	(1 << 26) | 
|  | 28 |  | 
|  | 29 | #define CACHE_LINE_SIZE		32 | 
|  | 30 | #define CACHE_LINE_SHIFT	5 | 
|  | 31 | #define CACHE_WAY_PER_SET	8 | 
|  | 32 |  | 
|  | 33 | #define CACHE_WAY_SIZE(l2ctype)	(8192 << (((l2ctype) >> 8) & 0xf)) | 
|  | 34 | #define CACHE_SET_SIZE(l2ctype)	(CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT) | 
|  | 35 |  | 
|  | 36 | static inline int xsc3_l2_present(void) | 
|  | 37 | { | 
|  | 38 | unsigned long l2ctype; | 
|  | 39 |  | 
|  | 40 | __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); | 
|  | 41 |  | 
|  | 42 | return !!(l2ctype & 0xf8); | 
|  | 43 | } | 
|  | 44 |  | 
|  | 45 | static inline void xsc3_l2_clean_mva(unsigned long addr) | 
|  | 46 | { | 
|  | 47 | __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | static inline void xsc3_l2_clean_pa(unsigned long addr) | 
|  | 51 | { | 
|  | 52 | xsc3_l2_clean_mva(__phys_to_virt(addr)); | 
|  | 53 | } | 
|  | 54 |  | 
|  | 55 | static inline void xsc3_l2_inv_mva(unsigned long addr) | 
|  | 56 | { | 
|  | 57 | __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | static inline void xsc3_l2_inv_pa(unsigned long addr) | 
|  | 61 | { | 
|  | 62 | xsc3_l2_inv_mva(__phys_to_virt(addr)); | 
|  | 63 | } | 
|  | 64 |  | 
|  | 65 | static inline void xsc3_l2_inv_all(void) | 
|  | 66 | { | 
|  | 67 | unsigned long l2ctype, set_way; | 
|  | 68 | int set, way; | 
|  | 69 |  | 
|  | 70 | __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); | 
|  | 71 |  | 
|  | 72 | for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { | 
|  | 73 | for (way = 0; way < CACHE_WAY_PER_SET; way++) { | 
|  | 74 | set_way = (way << 29) | (set << 5); | 
|  | 75 | __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way)); | 
|  | 76 | } | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | dsb(); | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | 
|  | 83 | { | 
|  | 84 | if (start == 0 && end == -1ul) { | 
|  | 85 | xsc3_l2_inv_all(); | 
|  | 86 | return; | 
|  | 87 | } | 
|  | 88 |  | 
|  | 89 | /* | 
|  | 90 | * Clean and invalidate partial first cache line. | 
|  | 91 | */ | 
|  | 92 | if (start & (CACHE_LINE_SIZE - 1)) { | 
|  | 93 | xsc3_l2_clean_pa(start & ~(CACHE_LINE_SIZE - 1)); | 
|  | 94 | xsc3_l2_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); | 
|  | 95 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | /* | 
|  | 99 | * Clean and invalidate partial last cache line. | 
|  | 100 | */ | 
| Dan Williams | c7cf72d | 2008-11-06 17:43:55 -0700 | [diff] [blame] | 101 | if (start < end && (end & (CACHE_LINE_SIZE - 1))) { | 
| Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 102 | xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); | 
|  | 103 | xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); | 
|  | 104 | end &= ~(CACHE_LINE_SIZE - 1); | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | /* | 
|  | 108 | * Invalidate all full cache lines between 'start' and 'end'. | 
|  | 109 | */ | 
| Dan Williams | c7cf72d | 2008-11-06 17:43:55 -0700 | [diff] [blame] | 110 | while (start < end) { | 
| Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 111 | xsc3_l2_inv_pa(start); | 
|  | 112 | start += CACHE_LINE_SIZE; | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | dsb(); | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) | 
|  | 119 | { | 
|  | 120 | start &= ~(CACHE_LINE_SIZE - 1); | 
|  | 121 | while (start < end) { | 
|  | 122 | xsc3_l2_clean_pa(start); | 
|  | 123 | start += CACHE_LINE_SIZE; | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | dsb(); | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | /* | 
|  | 130 | * optimize L2 flush all operation by set/way format | 
|  | 131 | */ | 
|  | 132 | static inline void xsc3_l2_flush_all(void) | 
|  | 133 | { | 
|  | 134 | unsigned long l2ctype, set_way; | 
|  | 135 | int set, way; | 
|  | 136 |  | 
|  | 137 | __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); | 
|  | 138 |  | 
|  | 139 | for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { | 
|  | 140 | for (way = 0; way < CACHE_WAY_PER_SET; way++) { | 
|  | 141 | set_way = (way << 29) | (set << 5); | 
|  | 142 | __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way)); | 
|  | 143 | } | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | dsb(); | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | 
|  | 150 | { | 
|  | 151 | if (start == 0 && end == -1ul) { | 
|  | 152 | xsc3_l2_flush_all(); | 
|  | 153 | return; | 
|  | 154 | } | 
|  | 155 |  | 
|  | 156 | start &= ~(CACHE_LINE_SIZE - 1); | 
|  | 157 | while (start < end) { | 
|  | 158 | xsc3_l2_clean_pa(start); | 
|  | 159 | xsc3_l2_inv_pa(start); | 
|  | 160 | start += CACHE_LINE_SIZE; | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | dsb(); | 
|  | 164 | } | 
|  | 165 |  | 
|  | 166 | static int __init xsc3_l2_init(void) | 
|  | 167 | { | 
|  | 168 | if (!cpu_is_xsc3() || !xsc3_l2_present()) | 
|  | 169 | return 0; | 
|  | 170 |  | 
|  | 171 | if (!(get_cr() & CR_L2)) { | 
|  | 172 | pr_info("XScale3 L2 cache enabled.\n"); | 
|  | 173 | adjust_cr(CR_L2, CR_L2); | 
|  | 174 | xsc3_l2_inv_all(); | 
|  | 175 | } | 
|  | 176 |  | 
|  | 177 | outer_cache.inv_range = xsc3_l2_inv_range; | 
|  | 178 | outer_cache.clean_range = xsc3_l2_clean_range; | 
|  | 179 | outer_cache.flush_range = xsc3_l2_flush_range; | 
|  | 180 |  | 
|  | 181 | return 0; | 
|  | 182 | } | 
|  | 183 | core_initcall(xsc3_l2_init); |