blob: 95a7079956a9cf2f24ed2de44ba6cf99215200e7 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2009, 2011, Code Aurora Forum. All rights reserved.
Catalin Marinas382266a2007-02-05 14:48:19 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010023
24#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025#include <asm/hardware/cache-l2x0.h>
26
27#define CACHE_LINE_SIZE 32
28
29static void __iomem *l2x0_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030static uint32_t aux_ctrl_save;
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -060031static uint32_t data_latency_ctrl;
Catalin Marinas07620972007-07-20 11:42:40 +010032static DEFINE_SPINLOCK(l2x0_lock);
Jason McMullan64039be2010-05-05 18:59:37 +010033static uint32_t l2x0_way_mask; /* Bitmask of active ways */
Santosh Shilimkar5ba70372010-07-11 14:35:37 +053034static uint32_t l2x0_size;
Catalin Marinas382266a2007-02-05 14:48:19 +010035
Catalin Marinas9a6655e2010-08-31 13:05:22 +010036static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010037{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010038 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010039 while (readl_relaxed(reg) & mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010040 ;
Catalin Marinas382266a2007-02-05 14:48:19 +010041}
42
Catalin Marinas9a6655e2010-08-31 13:05:22 +010043#ifdef CONFIG_CACHE_PL310
44static inline void cache_wait(void __iomem *reg, unsigned long mask)
45{
46 /* cache operations by line are atomic on PL310 */
47}
48#else
49#define cache_wait cache_wait_way
50#endif
51
Catalin Marinas382266a2007-02-05 14:48:19 +010052static inline void cache_sync(void)
53{
Russell King3d107432009-11-19 11:41:09 +000054 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010055
56#ifdef CONFIG_ARM_ERRATA_753970
57 /* write to an unmmapped register */
58 writel_relaxed(0, base + L2X0_DUMMY_REG);
59#else
Catalin Marinas6775a552010-07-28 22:01:25 +010060 writel_relaxed(0, base + L2X0_CACHE_SYNC);
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010061#endif
Russell King3d107432009-11-19 11:41:09 +000062 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +010063}
64
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010065static inline void l2x0_clean_line(unsigned long addr)
66{
67 void __iomem *base = l2x0_base;
68 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010069 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010070}
71
72static inline void l2x0_inv_line(unsigned long addr)
73{
74 void __iomem *base = l2x0_base;
75 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010076 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010077}
78
Santosh Shilimkar2839e062011-03-08 06:59:54 +010079#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Santosh Shilimkar9e655822010-02-04 19:42:42 +010080
Santosh Shilimkar2839e062011-03-08 06:59:54 +010081#define debug_writel(val) outer_cache.set_debug(val)
82
83static void l2x0_set_debug(unsigned long val)
84{
85 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
86}
87#else
88/* Optimised out for non-errata case */
89static inline void debug_writel(unsigned long val)
90{
Santosh Shilimkar9e655822010-02-04 19:42:42 +010091}
92
Santosh Shilimkar2839e062011-03-08 06:59:54 +010093#define l2x0_set_debug NULL
94#endif
95
96#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +010097static inline void l2x0_flush_line(unsigned long addr)
98{
99 void __iomem *base = l2x0_base;
100
101 /* Clean by PA followed by Invalidate by PA */
102 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100103 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100104 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100105 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100106}
107#else
108
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100109static inline void l2x0_flush_line(unsigned long addr)
110{
111 void __iomem *base = l2x0_base;
112 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100113 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100114}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100115#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117void l2x0_cache_sync(void)
Catalin Marinas23107c52010-03-24 16:48:53 +0100118{
Catalin Marinas23107c52010-03-24 16:48:53 +0100119 cache_sync();
Catalin Marinas23107c52010-03-24 16:48:53 +0100120}
121
Will Deacon38a89142011-07-01 14:36:19 +0100122static void __l2x0_flush_all(void)
123{
124 debug_writel(0x03);
125 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
126 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
127 cache_sync();
128 debug_writel(0x00);
129}
130
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530131static void l2x0_flush_all(void)
132{
133 unsigned long flags;
134
135 /* clean all ways */
136 spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100137 __l2x0_flush_all();
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530138 spin_unlock_irqrestore(&l2x0_lock, flags);
139}
140
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530141static void l2x0_clean_all(void)
142{
143 unsigned long flags;
144
145 /* clean all ways */
146 spin_lock_irqsave(&l2x0_lock, flags);
147 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
148 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
149 cache_sync();
150 spin_unlock_irqrestore(&l2x0_lock, flags);
151}
152
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530153static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100154{
Russell King0eb948d2009-11-19 11:12:15 +0000155 unsigned long flags;
156
Catalin Marinas382266a2007-02-05 14:48:19 +0100157 /* invalidate all ways */
Russell King0eb948d2009-11-19 11:12:15 +0000158 spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530159 /* Invalidating when L2 is enabled is a nono */
160 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100161 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100162 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
Catalin Marinas382266a2007-02-05 14:48:19 +0100163 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000164 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100165}
166
167static void l2x0_inv_range(unsigned long start, unsigned long end)
168{
Russell King3d107432009-11-19 11:41:09 +0000169 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000170 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100171
Russell King0eb948d2009-11-19 11:12:15 +0000172 spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100173 if (start & (CACHE_LINE_SIZE - 1)) {
174 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100175 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100176 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100177 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100178 start += CACHE_LINE_SIZE;
179 }
180
181 if (end & (CACHE_LINE_SIZE - 1)) {
182 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100183 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100184 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100185 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100186 }
187
Russell King0eb948d2009-11-19 11:12:15 +0000188 while (start < end) {
189 unsigned long blk_end = start + min(end - start, 4096UL);
190
191 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100192 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000193 start += CACHE_LINE_SIZE;
194 }
195
196 if (blk_end < end) {
197 spin_unlock_irqrestore(&l2x0_lock, flags);
198 spin_lock_irqsave(&l2x0_lock, flags);
199 }
200 }
Russell King3d107432009-11-19 11:41:09 +0000201 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100202 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000203 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100204}
205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206static void l2x0_inv_range_atomic(unsigned long start, unsigned long end)
207{
208 unsigned long addr;
209
210 if (start & (CACHE_LINE_SIZE - 1)) {
211 start &= ~(CACHE_LINE_SIZE - 1);
212 writel_relaxed(start, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
213 start += CACHE_LINE_SIZE;
214 }
215
216 if (end & (CACHE_LINE_SIZE - 1)) {
217 end &= ~(CACHE_LINE_SIZE - 1);
218 writel_relaxed(end, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
219 }
220
221 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
222 writel_relaxed(addr, l2x0_base + L2X0_INV_LINE_PA);
223
224 mb();
225}
226
Catalin Marinas382266a2007-02-05 14:48:19 +0100227static void l2x0_clean_range(unsigned long start, unsigned long end)
228{
Russell King3d107432009-11-19 11:41:09 +0000229 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000230 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100231
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530232 if ((end - start) >= l2x0_size) {
233 l2x0_clean_all();
234 return;
235 }
236
Russell King0eb948d2009-11-19 11:12:15 +0000237 spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100238 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000239 while (start < end) {
240 unsigned long blk_end = start + min(end - start, 4096UL);
241
242 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100243 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000244 start += CACHE_LINE_SIZE;
245 }
246
247 if (blk_end < end) {
248 spin_unlock_irqrestore(&l2x0_lock, flags);
249 spin_lock_irqsave(&l2x0_lock, flags);
250 }
251 }
Russell King3d107432009-11-19 11:41:09 +0000252 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100253 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000254 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100255}
256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257static void l2x0_clean_range_atomic(unsigned long start, unsigned long end)
258{
259 unsigned long addr;
260
261 start &= ~(CACHE_LINE_SIZE - 1);
262 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
263 writel_relaxed(addr, l2x0_base + L2X0_CLEAN_LINE_PA);
264
265 mb();
266}
267
Catalin Marinas382266a2007-02-05 14:48:19 +0100268static void l2x0_flush_range(unsigned long start, unsigned long end)
269{
Russell King3d107432009-11-19 11:41:09 +0000270 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000271 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100272
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530273 if ((end - start) >= l2x0_size) {
274 l2x0_flush_all();
275 return;
276 }
277
Russell King0eb948d2009-11-19 11:12:15 +0000278 spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100279 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000280 while (start < end) {
281 unsigned long blk_end = start + min(end - start, 4096UL);
282
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100283 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000284 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100285 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000286 start += CACHE_LINE_SIZE;
287 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100288 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000289
290 if (blk_end < end) {
291 spin_unlock_irqrestore(&l2x0_lock, flags);
292 spin_lock_irqsave(&l2x0_lock, flags);
293 }
294 }
Russell King3d107432009-11-19 11:41:09 +0000295 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100296 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000297 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100298}
299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300void l2x0_flush_range_atomic(unsigned long start, unsigned long end)
301{
302 unsigned long addr;
303
304 start &= ~(CACHE_LINE_SIZE - 1);
305 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
306 writel_relaxed(addr, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
307
308 mb();
309}
310
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530311static void l2x0_disable(void)
312{
313 unsigned long flags;
314
315 spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100316 __l2x0_flush_all();
317 writel_relaxed(0, l2x0_base + L2X0_CTRL);
318 dsb();
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530319 spin_unlock_irqrestore(&l2x0_lock, flags);
320}
321
Catalin Marinas382266a2007-02-05 14:48:19 +0100322void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
323{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324 __u32 aux, bits;
Jason McMullan64039be2010-05-05 18:59:37 +0100325 __u32 cache_id;
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530326 __u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100327 int ways;
328 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100329
330 l2x0_base = base;
Catalin Marinas6775a552010-07-28 22:01:25 +0100331 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332
333 bits = readl_relaxed(l2x0_base + L2X0_CTRL);
334 bits &= ~0x01; /* clear bit 0 */
335 writel_relaxed(bits, l2x0_base + L2X0_CTRL);
336
Catalin Marinas6775a552010-07-28 22:01:25 +0100337 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100338
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100339 aux &= aux_mask;
340 aux |= aux_val;
341
Jason McMullan64039be2010-05-05 18:59:37 +0100342 /* Determine the number of ways */
343 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
344 case L2X0_CACHE_ID_PART_L310:
345 if (aux & (1 << 16))
346 ways = 16;
347 else
348 ways = 8;
349 type = "L310";
350 break;
351 case L2X0_CACHE_ID_PART_L210:
352 ways = (aux >> 13) & 0xf;
353 type = "L210";
354 break;
355 default:
356 /* Assume unknown chips have 8 ways */
357 ways = 8;
358 type = "L2x0 series";
359 break;
360 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100362 l2x0_way_mask = (1 << ways) - 1;
363
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100364 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530365 * L2 cache Size = Way size * Number of ways
366 */
367 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
368 way_size = 1 << (way_size + 3);
369 l2x0_size = ways * way_size * SZ_1K;
370
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371 l2x0_inv_all();
Catalin Marinas382266a2007-02-05 14:48:19 +0100372
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 /* enable L2X0 */
374 bits = readl_relaxed(l2x0_base + L2X0_CTRL);
375 bits |= 0x01; /* set bit 0 */
376 writel_relaxed(bits, l2x0_base + L2X0_CTRL);
Catalin Marinas382266a2007-02-05 14:48:19 +0100377
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
379 case L2X0_CACHE_ID_PART_L220:
380 outer_cache.inv_range = l2x0_inv_range;
381 outer_cache.clean_range = l2x0_clean_range;
382 outer_cache.flush_range = l2x0_flush_range;
383 printk(KERN_INFO "L220 cache controller enabled\n");
384 break;
385 case L2X0_CACHE_ID_PART_L310:
386 outer_cache.inv_range = l2x0_inv_range;
387 outer_cache.clean_range = l2x0_clean_range;
388 outer_cache.flush_range = l2x0_flush_range;
389 printk(KERN_INFO "L310 cache controller enabled\n");
390 break;
391 case L2X0_CACHE_ID_PART_L210:
392 default:
393 outer_cache.inv_range = l2x0_inv_range_atomic;
394 outer_cache.clean_range = l2x0_clean_range_atomic;
395 outer_cache.flush_range = l2x0_flush_range_atomic;
396 printk(KERN_INFO "L210 cache controller enabled\n");
397 break;
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100398 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100399
Catalin Marinas23107c52010-03-24 16:48:53 +0100400 outer_cache.sync = l2x0_cache_sync;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530402 outer_cache.flush_all = l2x0_flush_all;
403 outer_cache.inv_all = l2x0_inv_all;
404 outer_cache.disable = l2x0_disable;
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100405 outer_cache.set_debug = l2x0_set_debug;
Catalin Marinas382266a2007-02-05 14:48:19 +0100406
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 mb();
Jason McMullan64039be2010-05-05 18:59:37 +0100408 printk(KERN_INFO "%s cache controller enabled\n", type);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530409 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
410 ways, cache_id, aux, l2x0_size);
Catalin Marinas382266a2007-02-05 14:48:19 +0100411}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412
413void l2x0_suspend(void)
414{
415 /* Save aux control register value */
416 aux_ctrl_save = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -0600417 data_latency_ctrl = readl_relaxed(l2x0_base + L2X0_DATA_LATENCY_CTRL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 /* Flush all cache */
419 l2x0_flush_all();
420 /* Disable the cache */
421 writel_relaxed(0, l2x0_base + L2X0_CTRL);
422
423 /* Memory barrier */
424 dmb();
425}
426
427void l2x0_resume(int collapsed)
428{
429 if (collapsed) {
430 /* Disable the cache */
431 writel_relaxed(0, l2x0_base + L2X0_CTRL);
432
433 /* Restore aux control register value */
434 writel_relaxed(aux_ctrl_save, l2x0_base + L2X0_AUX_CTRL);
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -0600435 writel_relaxed(data_latency_ctrl, l2x0_base +
436 L2X0_DATA_LATENCY_CTRL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437
438 /* Invalidate the cache */
439 l2x0_inv_all();
440 }
441
442 /* Enable the cache */
443 writel_relaxed(1, l2x0_base + L2X0_CTRL);
444
445 mb();
446}