blob: 21c3a92394de998dd17452ffa1f0f1ede0351ba2 [file] [log] [blame]
Michal Simek8beb8502009-03-27 14:25:16 +01001/*
2 * Cache control for MicroBlaze cache memories
3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
Michal Simek2ee2ff82009-12-10 11:43:57 +01006 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
Michal Simek8beb8502009-03-27 14:25:16 +01007 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 */
12
13#include <asm/cacheflush.h>
14#include <linux/cache.h>
15#include <asm/cpuinfo.h>
Michal Simek2ee2ff82009-12-10 11:43:57 +010016#include <asm/pvr.h>
Michal Simek8beb8502009-03-27 14:25:16 +010017
Michal Simek2ee2ff82009-12-10 11:43:57 +010018static inline void __enable_icache_msr(void)
Michal Simek8beb8502009-03-27 14:25:16 +010019{
Michal Simek2ee2ff82009-12-10 11:43:57 +010020 __asm__ __volatile__ (" msrset r0, %0; \
21 nop; " \
22 : : "i" (MSR_ICE) : "memory");
Michal Simek8beb8502009-03-27 14:25:16 +010023}
24
Michal Simek2ee2ff82009-12-10 11:43:57 +010025static inline void __disable_icache_msr(void)
Michal Simek8beb8502009-03-27 14:25:16 +010026{
Michal Simek2ee2ff82009-12-10 11:43:57 +010027 __asm__ __volatile__ (" msrclr r0, %0; \
28 nop; " \
29 : : "i" (MSR_ICE) : "memory");
30}
31
32static inline void __enable_dcache_msr(void)
33{
34 __asm__ __volatile__ (" msrset r0, %0; \
35 nop; " \
36 : \
37 : "i" (MSR_DCE) \
Michal Simek8beb8502009-03-27 14:25:16 +010038 : "memory");
Michal Simek2ee2ff82009-12-10 11:43:57 +010039}
40
41static inline void __disable_dcache_msr(void)
42{
43 __asm__ __volatile__ (" msrclr r0, %0; \
44 nop; " \
45 : \
46 : "i" (MSR_DCE) \
47 : "memory");
48}
49
50static inline void __enable_icache_nomsr(void)
51{
52 __asm__ __volatile__ (" mfs r12, rmsr; \
53 nop; \
54 ori r12, r12, %0; \
55 mts rmsr, r12; \
56 nop; " \
57 : \
58 : "i" (MSR_ICE) \
Michal Simek8beb8502009-03-27 14:25:16 +010059 : "memory", "r12");
Michal Simek2ee2ff82009-12-10 11:43:57 +010060}
61
62static inline void __disable_icache_nomsr(void)
63{
64 __asm__ __volatile__ (" mfs r12, rmsr; \
65 nop; \
66 andi r12, r12, ~%0; \
67 mts rmsr, r12; \
68 nop; " \
69 : \
70 : "i" (MSR_ICE) \
71 : "memory", "r12");
72}
73
74static inline void __enable_dcache_nomsr(void)
75{
76 __asm__ __volatile__ (" mfs r12, rmsr; \
77 nop; \
78 ori r12, r12, %0; \
79 mts rmsr, r12; \
80 nop; " \
81 : \
82 : "i" (MSR_DCE) \
83 : "memory", "r12");
84}
85
86static inline void __disable_dcache_nomsr(void)
87{
88 __asm__ __volatile__ (" mfs r12, rmsr; \
89 nop; \
90 andi r12, r12, ~%0; \
91 mts rmsr, r12; \
92 nop; " \
93 : \
94 : "i" (MSR_DCE) \
95 : "memory", "r12");
96}
97
98
Michal Simek3274c572010-04-26 08:54:13 +020099/* Helper macro for computing the limits of cache range loops
100 *
101 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros
103 */
Michal Simek2ee2ff82009-12-10 11:43:57 +0100104#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
105do { \
106 int align = ~(cache_line_length - 1); \
107 end = min(start + cache_size, end); \
108 start &= align; \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100109} while (0);
110
111/*
112 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline
114 */
115#define CACHE_ALL_LOOP(cache_size, line_length, op) \
116do { \
Michal Simek3274c572010-04-26 08:54:13 +0200117 unsigned int len = cache_size - line_length; \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100118 int step = -line_length; \
Michal Simek3274c572010-04-26 08:54:13 +0200119 WARN_ON(step >= 0); \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100120 \
121 __asm__ __volatile__ (" 1: " #op " %0, r0; \
122 bgtid %0, 1b; \
123 addk %0, %0, %1; \
124 " : : "r" (len), "r" (step) \
125 : "memory"); \
126} while (0);
127
Michal Simek3274c572010-04-26 08:54:13 +0200128/* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic.
130 *
131 * start address is cache aligned
132 * end address is not aligned, if end is aligned then I have to substract
133 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line.
135 */
Michal Simek2ee2ff82009-12-10 11:43:57 +0100136#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
137do { \
138 int step = -line_length; \
Michal Simek3274c572010-04-26 08:54:13 +0200139 int align = ~(line_length - 1); \
140 end = ((end & align) == end) ? end - line_length : end & align; \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100141 int count = end - start; \
Michal Simek3274c572010-04-26 08:54:13 +0200142 WARN_ON(count < 0); \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100143 \
Michal Simek22607a22010-02-15 16:41:40 +0100144 __asm__ __volatile__ (" 1: " #op " %0, %1; \
145 bgtid %1, 1b; \
146 addk %1, %1, %2; \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100147 " : : "r" (start), "r" (count), \
148 "r" (step) : "memory"); \
149} while (0);
150
151/* It is used only first parameter for OP - for wic, wdc */
152#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
153do { \
Michal Simek0d670b22010-02-15 10:50:42 +0100154 int volatile temp; \
Michal Simek3274c572010-04-26 08:54:13 +0200155 int align = ~(line_length - 1); \
156 end = ((end & align) == end) ? end - line_length : end & align; \
157 WARN_ON(end - start < 0); \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100158 \
Michal Simek22607a22010-02-15 16:41:40 +0100159 __asm__ __volatile__ (" 1: " #op " %1, r0; \
Michal Simek0d670b22010-02-15 10:50:42 +0100160 cmpu %0, %1, %2; \
161 bgtid %0, 1b; \
162 addk %1, %1, %3; \
163 " : : "r" (temp), "r" (start), "r" (end),\
164 "r" (line_length) : "memory"); \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100165} while (0);
166
Michal Simek22607a22010-02-15 16:41:40 +0100167#define ASM_LOOP
168
Michal Simek2ee2ff82009-12-10 11:43:57 +0100169static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
170{
171 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100172#ifndef ASM_LOOP
173 int i;
174#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100175 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
176 (unsigned int)start, (unsigned int) end);
177
178 CACHE_LOOP_LIMITS(start, end,
179 cpuinfo.icache_line_length, cpuinfo.icache_size);
180
181 local_irq_save(flags);
182 __disable_icache_msr();
183
Michal Simek22607a22010-02-15 16:41:40 +0100184#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100185 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100186#else
187 for (i = start; i < end; i += cpuinfo.icache_line_length)
188 __asm__ __volatile__ ("wic %0, r0;" \
189 : : "r" (i));
190#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100191 __enable_icache_msr();
192 local_irq_restore(flags);
193}
194
195static void __flush_icache_range_nomsr_irq(unsigned long start,
196 unsigned long end)
197{
198 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100199#ifndef ASM_LOOP
200 int i;
201#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100202 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
203 (unsigned int)start, (unsigned int) end);
204
205 CACHE_LOOP_LIMITS(start, end,
206 cpuinfo.icache_line_length, cpuinfo.icache_size);
207
208 local_irq_save(flags);
209 __disable_icache_nomsr();
210
Michal Simek22607a22010-02-15 16:41:40 +0100211#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100212 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100213#else
214 for (i = start; i < end; i += cpuinfo.icache_line_length)
215 __asm__ __volatile__ ("wic %0, r0;" \
216 : : "r" (i));
217#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100218
219 __enable_icache_nomsr();
220 local_irq_restore(flags);
221}
222
223static void __flush_icache_range_noirq(unsigned long start,
224 unsigned long end)
225{
Michal Simek22607a22010-02-15 16:41:40 +0100226#ifndef ASM_LOOP
227 int i;
228#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
230 (unsigned int)start, (unsigned int) end);
231
232 CACHE_LOOP_LIMITS(start, end,
233 cpuinfo.icache_line_length, cpuinfo.icache_size);
Michal Simek22607a22010-02-15 16:41:40 +0100234#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100235 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100236#else
237 for (i = start; i < end; i += cpuinfo.icache_line_length)
238 __asm__ __volatile__ ("wic %0, r0;" \
239 : : "r" (i));
240#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100241}
242
243static void __flush_icache_all_msr_irq(void)
244{
245 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100246#ifndef ASM_LOOP
247 int i;
248#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100249 pr_debug("%s\n", __func__);
250
251 local_irq_save(flags);
252 __disable_icache_msr();
Michal Simek22607a22010-02-15 16:41:40 +0100253#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100254 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100255#else
256 for (i = 0; i < cpuinfo.icache_size;
257 i += cpuinfo.icache_line_length)
258 __asm__ __volatile__ ("wic %0, r0;" \
259 : : "r" (i));
260#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100261 __enable_icache_msr();
262 local_irq_restore(flags);
263}
264
265static void __flush_icache_all_nomsr_irq(void)
266{
267 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100268#ifndef ASM_LOOP
269 int i;
270#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100271 pr_debug("%s\n", __func__);
272
273 local_irq_save(flags);
274 __disable_icache_nomsr();
Michal Simek22607a22010-02-15 16:41:40 +0100275#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100276 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100277#else
278 for (i = 0; i < cpuinfo.icache_size;
279 i += cpuinfo.icache_line_length)
280 __asm__ __volatile__ ("wic %0, r0;" \
281 : : "r" (i));
282#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100283 __enable_icache_nomsr();
284 local_irq_restore(flags);
285}
286
287static void __flush_icache_all_noirq(void)
288{
Michal Simek22607a22010-02-15 16:41:40 +0100289#ifndef ASM_LOOP
290 int i;
291#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100292 pr_debug("%s\n", __func__);
Michal Simek22607a22010-02-15 16:41:40 +0100293#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100294 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100295#else
296 for (i = 0; i < cpuinfo.icache_size;
297 i += cpuinfo.icache_line_length)
298 __asm__ __volatile__ ("wic %0, r0;" \
299 : : "r" (i));
300#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100301}
302
303static void __invalidate_dcache_all_msr_irq(void)
304{
305 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100306#ifndef ASM_LOOP
307 int i;
308#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100309 pr_debug("%s\n", __func__);
310
311 local_irq_save(flags);
312 __disable_dcache_msr();
Michal Simek22607a22010-02-15 16:41:40 +0100313#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100314 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100315#else
316 for (i = 0; i < cpuinfo.dcache_size;
317 i += cpuinfo.dcache_line_length)
318 __asm__ __volatile__ ("wdc %0, r0;" \
319 : : "r" (i));
320#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100321 __enable_dcache_msr();
322 local_irq_restore(flags);
323}
324
325static void __invalidate_dcache_all_nomsr_irq(void)
326{
327 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100328#ifndef ASM_LOOP
329 int i;
330#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100331 pr_debug("%s\n", __func__);
332
333 local_irq_save(flags);
334 __disable_dcache_nomsr();
Michal Simek22607a22010-02-15 16:41:40 +0100335#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100336 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100337#else
338 for (i = 0; i < cpuinfo.dcache_size;
339 i += cpuinfo.dcache_line_length)
340 __asm__ __volatile__ ("wdc %0, r0;" \
341 : : "r" (i));
342#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100343 __enable_dcache_nomsr();
344 local_irq_restore(flags);
345}
346
347static void __invalidate_dcache_all_noirq_wt(void)
348{
Michal Simek22607a22010-02-15 16:41:40 +0100349#ifndef ASM_LOOP
350 int i;
351#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100352 pr_debug("%s\n", __func__);
Michal Simek22607a22010-02-15 16:41:40 +0100353#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100354 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
Michal Simek22607a22010-02-15 16:41:40 +0100355#else
356 for (i = 0; i < cpuinfo.dcache_size;
357 i += cpuinfo.dcache_line_length)
358 __asm__ __volatile__ ("wdc %0, r0;" \
359 : : "r" (i));
360#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100361}
362
Michal Simek3274c572010-04-26 08:54:13 +0200363/* FIXME It is blindly invalidation as is expected
364 * but can't be called on noMMU in microblaze_cache_init below
365 *
366 * MS: noMMU kernel won't boot if simple wdc is used
367 * The reason should be that there are discared data which kernel needs
368 */
Michal Simek2ee2ff82009-12-10 11:43:57 +0100369static void __invalidate_dcache_all_wb(void)
370{
Michal Simek22607a22010-02-15 16:41:40 +0100371#ifndef ASM_LOOP
372 int i;
373#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100374 pr_debug("%s\n", __func__);
Michal Simek22607a22010-02-15 16:41:40 +0100375#ifdef ASM_LOOP
Michal Simek3274c572010-04-26 08:54:13 +0200376 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
377 wdc)
Michal Simek22607a22010-02-15 16:41:40 +0100378#else
379 for (i = 0; i < cpuinfo.dcache_size;
380 i += cpuinfo.dcache_line_length)
Michal Simek3274c572010-04-26 08:54:13 +0200381 __asm__ __volatile__ ("wdc %0, r0;" \
Michal Simek22607a22010-02-15 16:41:40 +0100382 : : "r" (i));
383#endif
Michal Simek8beb8502009-03-27 14:25:16 +0100384}
385
Michal Simek2ee2ff82009-12-10 11:43:57 +0100386static void __invalidate_dcache_range_wb(unsigned long start,
387 unsigned long end)
Michal Simek8beb8502009-03-27 14:25:16 +0100388{
Michal Simek22607a22010-02-15 16:41:40 +0100389#ifndef ASM_LOOP
390 int i;
391#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100392 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
393 (unsigned int)start, (unsigned int) end);
394
395 CACHE_LOOP_LIMITS(start, end,
396 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
Michal Simek22607a22010-02-15 16:41:40 +0100397#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100398 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
Michal Simek22607a22010-02-15 16:41:40 +0100399#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200400 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100401 __asm__ __volatile__ ("wdc.clear %0, r0;" \
402 : : "r" (i));
403#endif
Michal Simek8beb8502009-03-27 14:25:16 +0100404}
405
Michal Simek2ee2ff82009-12-10 11:43:57 +0100406static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
407 unsigned long end)
Michal Simek8beb8502009-03-27 14:25:16 +0100408{
Michal Simek22607a22010-02-15 16:41:40 +0100409#ifndef ASM_LOOP
410 int i;
411#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100412 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
413 (unsigned int)start, (unsigned int) end);
414 CACHE_LOOP_LIMITS(start, end,
415 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
Michal Simek8beb8502009-03-27 14:25:16 +0100416
Michal Simek22607a22010-02-15 16:41:40 +0100417#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100418 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100419#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200420 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100421 __asm__ __volatile__ ("wdc %0, r0;" \
422 : : "r" (i));
423#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100424}
Michal Simek8beb8502009-03-27 14:25:16 +0100425
Michal Simek2ee2ff82009-12-10 11:43:57 +0100426static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
427 unsigned long end)
428{
429 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100430#ifndef ASM_LOOP
431 int i;
432#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100433 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
434 (unsigned int)start, (unsigned int) end);
435 CACHE_LOOP_LIMITS(start, end,
436 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
437
438 local_irq_save(flags);
439 __disable_dcache_msr();
440
Michal Simek22607a22010-02-15 16:41:40 +0100441#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100442 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100443#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200444 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100445 __asm__ __volatile__ ("wdc %0, r0;" \
446 : : "r" (i));
447#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100448
449 __enable_dcache_msr();
450 local_irq_restore(flags);
451}
452
453static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
454 unsigned long end)
455{
456 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100457#ifndef ASM_LOOP
458 int i;
459#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100460 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
461 (unsigned int)start, (unsigned int) end);
462
463 CACHE_LOOP_LIMITS(start, end,
464 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
465
466 local_irq_save(flags);
467 __disable_dcache_nomsr();
468
Michal Simek22607a22010-02-15 16:41:40 +0100469#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100470 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100471#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200472 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100473 __asm__ __volatile__ ("wdc %0, r0;" \
474 : : "r" (i));
475#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100476
477 __enable_dcache_nomsr();
478 local_irq_restore(flags);
479}
480
481static void __flush_dcache_all_wb(void)
482{
Michal Simek22607a22010-02-15 16:41:40 +0100483#ifndef ASM_LOOP
484 int i;
485#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100486 pr_debug("%s\n", __func__);
Michal Simek22607a22010-02-15 16:41:40 +0100487#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100488 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
489 wdc.flush);
Michal Simek22607a22010-02-15 16:41:40 +0100490#else
491 for (i = 0; i < cpuinfo.dcache_size;
492 i += cpuinfo.dcache_line_length)
493 __asm__ __volatile__ ("wdc.flush %0, r0;" \
494 : : "r" (i));
495#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100496}
497
498static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
499{
Michal Simek22607a22010-02-15 16:41:40 +0100500#ifndef ASM_LOOP
501 int i;
502#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100503 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
504 (unsigned int)start, (unsigned int) end);
505
506 CACHE_LOOP_LIMITS(start, end,
507 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
Michal Simek22607a22010-02-15 16:41:40 +0100508#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100509 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
Michal Simek22607a22010-02-15 16:41:40 +0100510#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200511 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100512 __asm__ __volatile__ ("wdc.flush %0, r0;" \
513 : : "r" (i));
514#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100515}
516
517/* struct for wb caches and for wt caches */
518struct scache *mbc;
519
520/* new wb cache model */
521const struct scache wb_msr = {
522 .ie = __enable_icache_msr,
523 .id = __disable_icache_msr,
524 .ifl = __flush_icache_all_noirq,
525 .iflr = __flush_icache_range_noirq,
526 .iin = __flush_icache_all_noirq,
527 .iinr = __flush_icache_range_noirq,
528 .de = __enable_dcache_msr,
529 .dd = __disable_dcache_msr,
530 .dfl = __flush_dcache_all_wb,
531 .dflr = __flush_dcache_range_wb,
532 .din = __invalidate_dcache_all_wb,
533 .dinr = __invalidate_dcache_range_wb,
534};
535
536/* There is only difference in ie, id, de, dd functions */
537const struct scache wb_nomsr = {
538 .ie = __enable_icache_nomsr,
539 .id = __disable_icache_nomsr,
540 .ifl = __flush_icache_all_noirq,
541 .iflr = __flush_icache_range_noirq,
542 .iin = __flush_icache_all_noirq,
543 .iinr = __flush_icache_range_noirq,
544 .de = __enable_dcache_nomsr,
545 .dd = __disable_dcache_nomsr,
546 .dfl = __flush_dcache_all_wb,
547 .dflr = __flush_dcache_range_wb,
548 .din = __invalidate_dcache_all_wb,
549 .dinr = __invalidate_dcache_range_wb,
550};
551
552/* Old wt cache model with disabling irq and turn off cache */
553const struct scache wt_msr = {
554 .ie = __enable_icache_msr,
555 .id = __disable_icache_msr,
556 .ifl = __flush_icache_all_msr_irq,
557 .iflr = __flush_icache_range_msr_irq,
558 .iin = __flush_icache_all_msr_irq,
559 .iinr = __flush_icache_range_msr_irq,
560 .de = __enable_dcache_msr,
561 .dd = __disable_dcache_msr,
562 .dfl = __invalidate_dcache_all_msr_irq,
563 .dflr = __invalidate_dcache_range_msr_irq_wt,
564 .din = __invalidate_dcache_all_msr_irq,
565 .dinr = __invalidate_dcache_range_msr_irq_wt,
566};
567
568const struct scache wt_nomsr = {
569 .ie = __enable_icache_nomsr,
570 .id = __disable_icache_nomsr,
571 .ifl = __flush_icache_all_nomsr_irq,
572 .iflr = __flush_icache_range_nomsr_irq,
573 .iin = __flush_icache_all_nomsr_irq,
574 .iinr = __flush_icache_range_nomsr_irq,
575 .de = __enable_dcache_nomsr,
576 .dd = __disable_dcache_nomsr,
577 .dfl = __invalidate_dcache_all_nomsr_irq,
578 .dflr = __invalidate_dcache_range_nomsr_irq,
579 .din = __invalidate_dcache_all_nomsr_irq,
580 .dinr = __invalidate_dcache_range_nomsr_irq,
581};
582
583/* New wt cache model for newer Microblaze versions */
584const struct scache wt_msr_noirq = {
585 .ie = __enable_icache_msr,
586 .id = __disable_icache_msr,
587 .ifl = __flush_icache_all_noirq,
588 .iflr = __flush_icache_range_noirq,
589 .iin = __flush_icache_all_noirq,
590 .iinr = __flush_icache_range_noirq,
591 .de = __enable_dcache_msr,
592 .dd = __disable_dcache_msr,
593 .dfl = __invalidate_dcache_all_noirq_wt,
594 .dflr = __invalidate_dcache_range_nomsr_wt,
595 .din = __invalidate_dcache_all_noirq_wt,
596 .dinr = __invalidate_dcache_range_nomsr_wt,
597};
598
599const struct scache wt_nomsr_noirq = {
600 .ie = __enable_icache_nomsr,
601 .id = __disable_icache_nomsr,
602 .ifl = __flush_icache_all_noirq,
603 .iflr = __flush_icache_range_noirq,
604 .iin = __flush_icache_all_noirq,
605 .iinr = __flush_icache_range_noirq,
606 .de = __enable_dcache_nomsr,
607 .dd = __disable_dcache_nomsr,
608 .dfl = __invalidate_dcache_all_noirq_wt,
609 .dflr = __invalidate_dcache_range_nomsr_wt,
610 .din = __invalidate_dcache_all_noirq_wt,
611 .dinr = __invalidate_dcache_range_nomsr_wt,
612};
613
614/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
615#define CPUVER_7_20_A 0x0c
616#define CPUVER_7_20_D 0x0f
617
Frans Pop4c912c12010-02-06 18:47:12 +0100618#define INFO(s) printk(KERN_INFO "cache: " s "\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100619
620void microblaze_cache_init(void)
621{
622 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
623 if (cpuinfo.dcache_wb) {
624 INFO("wb_msr");
625 mbc = (struct scache *)&wb_msr;
626 if (cpuinfo.ver_code < CPUVER_7_20_D) {
627 /* MS: problem with signal handling - hw bug */
628 INFO("WB won't work properly");
629 }
630 } else {
631 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
632 INFO("wt_msr_noirq");
633 mbc = (struct scache *)&wt_msr_noirq;
634 } else {
635 INFO("wt_msr");
636 mbc = (struct scache *)&wt_msr;
637 }
638 }
639 } else {
640 if (cpuinfo.dcache_wb) {
641 INFO("wb_nomsr");
642 mbc = (struct scache *)&wb_nomsr;
643 if (cpuinfo.ver_code < CPUVER_7_20_D) {
644 /* MS: problem with signal handling - hw bug */
645 INFO("WB won't work properly");
646 }
647 } else {
648 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
649 INFO("wt_nomsr_noirq");
650 mbc = (struct scache *)&wt_nomsr_noirq;
651 } else {
652 INFO("wt_nomsr");
653 mbc = (struct scache *)&wt_nomsr;
654 }
655 }
Michal Simek8beb8502009-03-27 14:25:16 +0100656 }
Michal Simek3274c572010-04-26 08:54:13 +0200657/* FIXME Invalidation is done in U-BOOT
658 * WT cache: Data is already written to main memory
659 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
660 */
661 /* invalidate_dcache(); */
Michal Simek407c1da2010-01-12 14:51:04 +0100662 enable_dcache();
663
664 invalidate_icache();
665 enable_icache();
Michal Simek8beb8502009-03-27 14:25:16 +0100666}