blob: 25a42602806aba28897332c96496873cd7d1e964 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2009, 2011, Code Aurora Forum. All rights reserved.
Catalin Marinas382266a2007-02-05 14:48:19 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010023
24#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025#include <asm/hardware/cache-l2x0.h>
26
27#define CACHE_LINE_SIZE 32
28
29static void __iomem *l2x0_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030static uint32_t aux_ctrl_save;
Catalin Marinas07620972007-07-20 11:42:40 +010031static DEFINE_SPINLOCK(l2x0_lock);
Jason McMullan64039be2010-05-05 18:59:37 +010032static uint32_t l2x0_way_mask; /* Bitmask of active ways */
Santosh Shilimkar5ba70372010-07-11 14:35:37 +053033static uint32_t l2x0_size;
Catalin Marinas382266a2007-02-05 14:48:19 +010034
Catalin Marinas9a6655e2010-08-31 13:05:22 +010035static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010036{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010037 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010038 while (readl_relaxed(reg) & mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010039 ;
Catalin Marinas382266a2007-02-05 14:48:19 +010040}
41
Catalin Marinas9a6655e2010-08-31 13:05:22 +010042#ifdef CONFIG_CACHE_PL310
43static inline void cache_wait(void __iomem *reg, unsigned long mask)
44{
45 /* cache operations by line are atomic on PL310 */
46}
47#else
48#define cache_wait cache_wait_way
49#endif
50
Catalin Marinas382266a2007-02-05 14:48:19 +010051static inline void cache_sync(void)
52{
Russell King3d107432009-11-19 11:41:09 +000053 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010054
55#ifdef CONFIG_ARM_ERRATA_753970
56 /* write to an unmmapped register */
57 writel_relaxed(0, base + L2X0_DUMMY_REG);
58#else
Catalin Marinas6775a552010-07-28 22:01:25 +010059 writel_relaxed(0, base + L2X0_CACHE_SYNC);
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010060#endif
Russell King3d107432009-11-19 11:41:09 +000061 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +010062}
63
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010064static inline void l2x0_clean_line(unsigned long addr)
65{
66 void __iomem *base = l2x0_base;
67 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010068 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010069}
70
71static inline void l2x0_inv_line(unsigned long addr)
72{
73 void __iomem *base = l2x0_base;
74 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010075 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010076}
77
Santosh Shilimkar2839e062011-03-08 06:59:54 +010078#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Santosh Shilimkar9e655822010-02-04 19:42:42 +010079
Santosh Shilimkar2839e062011-03-08 06:59:54 +010080#define debug_writel(val) outer_cache.set_debug(val)
81
82static void l2x0_set_debug(unsigned long val)
83{
84 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
85}
86#else
87/* Optimised out for non-errata case */
88static inline void debug_writel(unsigned long val)
89{
Santosh Shilimkar9e655822010-02-04 19:42:42 +010090}
91
Santosh Shilimkar2839e062011-03-08 06:59:54 +010092#define l2x0_set_debug NULL
93#endif
94
95#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +010096static inline void l2x0_flush_line(unsigned long addr)
97{
98 void __iomem *base = l2x0_base;
99
100 /* Clean by PA followed by Invalidate by PA */
101 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100102 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100103 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100104 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100105}
106#else
107
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100108static inline void l2x0_flush_line(unsigned long addr)
109{
110 void __iomem *base = l2x0_base;
111 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100112 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100113}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100114#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116void l2x0_cache_sync(void)
Catalin Marinas23107c52010-03-24 16:48:53 +0100117{
Catalin Marinas23107c52010-03-24 16:48:53 +0100118 cache_sync();
Catalin Marinas23107c52010-03-24 16:48:53 +0100119}
120
Will Deacon38a89142011-07-01 14:36:19 +0100121static void __l2x0_flush_all(void)
122{
123 debug_writel(0x03);
124 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
125 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
126 cache_sync();
127 debug_writel(0x00);
128}
129
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530130static void l2x0_flush_all(void)
131{
132 unsigned long flags;
133
134 /* clean all ways */
135 spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100136 __l2x0_flush_all();
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530137 spin_unlock_irqrestore(&l2x0_lock, flags);
138}
139
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530140static void l2x0_clean_all(void)
141{
142 unsigned long flags;
143
144 /* clean all ways */
145 spin_lock_irqsave(&l2x0_lock, flags);
146 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
147 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
148 cache_sync();
149 spin_unlock_irqrestore(&l2x0_lock, flags);
150}
151
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530152static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100153{
Russell King0eb948d2009-11-19 11:12:15 +0000154 unsigned long flags;
155
Catalin Marinas382266a2007-02-05 14:48:19 +0100156 /* invalidate all ways */
Russell King0eb948d2009-11-19 11:12:15 +0000157 spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530158 /* Invalidating when L2 is enabled is a nono */
159 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100160 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100161 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
Catalin Marinas382266a2007-02-05 14:48:19 +0100162 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000163 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100164}
165
166static void l2x0_inv_range(unsigned long start, unsigned long end)
167{
Russell King3d107432009-11-19 11:41:09 +0000168 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000169 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100170
Russell King0eb948d2009-11-19 11:12:15 +0000171 spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100172 if (start & (CACHE_LINE_SIZE - 1)) {
173 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100174 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100175 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100176 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100177 start += CACHE_LINE_SIZE;
178 }
179
180 if (end & (CACHE_LINE_SIZE - 1)) {
181 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100182 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100183 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100184 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100185 }
186
Russell King0eb948d2009-11-19 11:12:15 +0000187 while (start < end) {
188 unsigned long blk_end = start + min(end - start, 4096UL);
189
190 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100191 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000192 start += CACHE_LINE_SIZE;
193 }
194
195 if (blk_end < end) {
196 spin_unlock_irqrestore(&l2x0_lock, flags);
197 spin_lock_irqsave(&l2x0_lock, flags);
198 }
199 }
Russell King3d107432009-11-19 11:41:09 +0000200 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100201 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000202 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100203}
204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205static void l2x0_inv_range_atomic(unsigned long start, unsigned long end)
206{
207 unsigned long addr;
208
209 if (start & (CACHE_LINE_SIZE - 1)) {
210 start &= ~(CACHE_LINE_SIZE - 1);
211 writel_relaxed(start, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
212 start += CACHE_LINE_SIZE;
213 }
214
215 if (end & (CACHE_LINE_SIZE - 1)) {
216 end &= ~(CACHE_LINE_SIZE - 1);
217 writel_relaxed(end, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
218 }
219
220 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
221 writel_relaxed(addr, l2x0_base + L2X0_INV_LINE_PA);
222
223 mb();
224}
225
Catalin Marinas382266a2007-02-05 14:48:19 +0100226static void l2x0_clean_range(unsigned long start, unsigned long end)
227{
Russell King3d107432009-11-19 11:41:09 +0000228 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000229 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100230
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530231 if ((end - start) >= l2x0_size) {
232 l2x0_clean_all();
233 return;
234 }
235
Russell King0eb948d2009-11-19 11:12:15 +0000236 spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100237 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000238 while (start < end) {
239 unsigned long blk_end = start + min(end - start, 4096UL);
240
241 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100242 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000243 start += CACHE_LINE_SIZE;
244 }
245
246 if (blk_end < end) {
247 spin_unlock_irqrestore(&l2x0_lock, flags);
248 spin_lock_irqsave(&l2x0_lock, flags);
249 }
250 }
Russell King3d107432009-11-19 11:41:09 +0000251 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100252 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000253 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100254}
255
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256static void l2x0_clean_range_atomic(unsigned long start, unsigned long end)
257{
258 unsigned long addr;
259
260 start &= ~(CACHE_LINE_SIZE - 1);
261 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
262 writel_relaxed(addr, l2x0_base + L2X0_CLEAN_LINE_PA);
263
264 mb();
265}
266
Catalin Marinas382266a2007-02-05 14:48:19 +0100267static void l2x0_flush_range(unsigned long start, unsigned long end)
268{
Russell King3d107432009-11-19 11:41:09 +0000269 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000270 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100271
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530272 if ((end - start) >= l2x0_size) {
273 l2x0_flush_all();
274 return;
275 }
276
Russell King0eb948d2009-11-19 11:12:15 +0000277 spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100278 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000279 while (start < end) {
280 unsigned long blk_end = start + min(end - start, 4096UL);
281
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100282 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000283 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100284 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000285 start += CACHE_LINE_SIZE;
286 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100287 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000288
289 if (blk_end < end) {
290 spin_unlock_irqrestore(&l2x0_lock, flags);
291 spin_lock_irqsave(&l2x0_lock, flags);
292 }
293 }
Russell King3d107432009-11-19 11:41:09 +0000294 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100295 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000296 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100297}
298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299void l2x0_flush_range_atomic(unsigned long start, unsigned long end)
300{
301 unsigned long addr;
302
303 start &= ~(CACHE_LINE_SIZE - 1);
304 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
305 writel_relaxed(addr, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
306
307 mb();
308}
309
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530310static void l2x0_disable(void)
311{
312 unsigned long flags;
313
314 spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100315 __l2x0_flush_all();
316 writel_relaxed(0, l2x0_base + L2X0_CTRL);
317 dsb();
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530318 spin_unlock_irqrestore(&l2x0_lock, flags);
319}
320
Catalin Marinas382266a2007-02-05 14:48:19 +0100321void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
322{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323 __u32 aux, bits;
Jason McMullan64039be2010-05-05 18:59:37 +0100324 __u32 cache_id;
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530325 __u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100326 int ways;
327 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100328
329 l2x0_base = base;
Catalin Marinas6775a552010-07-28 22:01:25 +0100330 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331
332 bits = readl_relaxed(l2x0_base + L2X0_CTRL);
333 bits &= ~0x01; /* clear bit 0 */
334 writel_relaxed(bits, l2x0_base + L2X0_CTRL);
335
Catalin Marinas6775a552010-07-28 22:01:25 +0100336 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100337
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100338 aux &= aux_mask;
339 aux |= aux_val;
340
Jason McMullan64039be2010-05-05 18:59:37 +0100341 /* Determine the number of ways */
342 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
343 case L2X0_CACHE_ID_PART_L310:
344 if (aux & (1 << 16))
345 ways = 16;
346 else
347 ways = 8;
348 type = "L310";
349 break;
350 case L2X0_CACHE_ID_PART_L210:
351 ways = (aux >> 13) & 0xf;
352 type = "L210";
353 break;
354 default:
355 /* Assume unknown chips have 8 ways */
356 ways = 8;
357 type = "L2x0 series";
358 break;
359 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100361 l2x0_way_mask = (1 << ways) - 1;
362
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100363 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530364 * L2 cache Size = Way size * Number of ways
365 */
366 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
367 way_size = 1 << (way_size + 3);
368 l2x0_size = ways * way_size * SZ_1K;
369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 l2x0_inv_all();
Catalin Marinas382266a2007-02-05 14:48:19 +0100371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 /* enable L2X0 */
373 bits = readl_relaxed(l2x0_base + L2X0_CTRL);
374 bits |= 0x01; /* set bit 0 */
375 writel_relaxed(bits, l2x0_base + L2X0_CTRL);
Catalin Marinas382266a2007-02-05 14:48:19 +0100376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
378 case L2X0_CACHE_ID_PART_L220:
379 outer_cache.inv_range = l2x0_inv_range;
380 outer_cache.clean_range = l2x0_clean_range;
381 outer_cache.flush_range = l2x0_flush_range;
382 printk(KERN_INFO "L220 cache controller enabled\n");
383 break;
384 case L2X0_CACHE_ID_PART_L310:
385 outer_cache.inv_range = l2x0_inv_range;
386 outer_cache.clean_range = l2x0_clean_range;
387 outer_cache.flush_range = l2x0_flush_range;
388 printk(KERN_INFO "L310 cache controller enabled\n");
389 break;
390 case L2X0_CACHE_ID_PART_L210:
391 default:
392 outer_cache.inv_range = l2x0_inv_range_atomic;
393 outer_cache.clean_range = l2x0_clean_range_atomic;
394 outer_cache.flush_range = l2x0_flush_range_atomic;
395 printk(KERN_INFO "L210 cache controller enabled\n");
396 break;
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100397 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100398
Catalin Marinas23107c52010-03-24 16:48:53 +0100399 outer_cache.sync = l2x0_cache_sync;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530401 outer_cache.flush_all = l2x0_flush_all;
402 outer_cache.inv_all = l2x0_inv_all;
403 outer_cache.disable = l2x0_disable;
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100404 outer_cache.set_debug = l2x0_set_debug;
Catalin Marinas382266a2007-02-05 14:48:19 +0100405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 mb();
Jason McMullan64039be2010-05-05 18:59:37 +0100407 printk(KERN_INFO "%s cache controller enabled\n", type);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530408 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
409 ways, cache_id, aux, l2x0_size);
Catalin Marinas382266a2007-02-05 14:48:19 +0100410}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411
412void l2x0_suspend(void)
413{
414 /* Save aux control register value */
415 aux_ctrl_save = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
416 /* Flush all cache */
417 l2x0_flush_all();
418 /* Disable the cache */
419 writel_relaxed(0, l2x0_base + L2X0_CTRL);
420
421 /* Memory barrier */
422 dmb();
423}
424
425void l2x0_resume(int collapsed)
426{
427 if (collapsed) {
428 /* Disable the cache */
429 writel_relaxed(0, l2x0_base + L2X0_CTRL);
430
431 /* Restore aux control register value */
432 writel_relaxed(aux_ctrl_save, l2x0_base + L2X0_AUX_CTRL);
433
434 /* Invalidate the cache */
435 l2x0_inv_all();
436 }
437
438 /* Enable the cache */
439 writel_relaxed(1, l2x0_base + L2X0_CTRL);
440
441 mb();
442}