blob: 4254514b4c8cf8cf3819c66ad90eb4e18145c696 [file] [log] [blame]
Michal Simek8beb8502009-03-27 14:25:16 +01001/*
2 * Cache control for MicroBlaze cache memories
3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
Michal Simek2ee2ff82009-12-10 11:43:57 +01006 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
Michal Simek8beb8502009-03-27 14:25:16 +01007 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 */
12
13#include <asm/cacheflush.h>
14#include <linux/cache.h>
15#include <asm/cpuinfo.h>
Michal Simek2ee2ff82009-12-10 11:43:57 +010016#include <asm/pvr.h>
Michal Simek8beb8502009-03-27 14:25:16 +010017
Michal Simek2ee2ff82009-12-10 11:43:57 +010018static inline void __enable_icache_msr(void)
Michal Simek8beb8502009-03-27 14:25:16 +010019{
Michal Simek6bd55f02012-12-27 10:40:38 +010020 __asm__ __volatile__ (" msrset r0, %0;" \
21 "nop;" \
Michal Simek2ee2ff82009-12-10 11:43:57 +010022 : : "i" (MSR_ICE) : "memory");
Michal Simek8beb8502009-03-27 14:25:16 +010023}
24
Michal Simek2ee2ff82009-12-10 11:43:57 +010025static inline void __disable_icache_msr(void)
Michal Simek8beb8502009-03-27 14:25:16 +010026{
Michal Simek6bd55f02012-12-27 10:40:38 +010027 __asm__ __volatile__ (" msrclr r0, %0;" \
28 "nop;" \
Michal Simek2ee2ff82009-12-10 11:43:57 +010029 : : "i" (MSR_ICE) : "memory");
30}
31
32static inline void __enable_dcache_msr(void)
33{
Michal Simek6bd55f02012-12-27 10:40:38 +010034 __asm__ __volatile__ (" msrset r0, %0;" \
35 "nop;" \
36 : : "i" (MSR_DCE) : "memory");
Michal Simek2ee2ff82009-12-10 11:43:57 +010037}
38
39static inline void __disable_dcache_msr(void)
40{
Michal Simek6bd55f02012-12-27 10:40:38 +010041 __asm__ __volatile__ (" msrclr r0, %0;" \
42 "nop; " \
43 : : "i" (MSR_DCE) : "memory");
Michal Simek2ee2ff82009-12-10 11:43:57 +010044}
45
46static inline void __enable_icache_nomsr(void)
47{
Michal Simek6bd55f02012-12-27 10:40:38 +010048 __asm__ __volatile__ (" mfs r12, rmsr;" \
49 "nop;" \
50 "ori r12, r12, %0;" \
51 "mts rmsr, r12;" \
52 "nop;" \
53 : : "i" (MSR_ICE) : "memory", "r12");
Michal Simek2ee2ff82009-12-10 11:43:57 +010054}
55
56static inline void __disable_icache_nomsr(void)
57{
Michal Simek6bd55f02012-12-27 10:40:38 +010058 __asm__ __volatile__ (" mfs r12, rmsr;" \
59 "nop;" \
60 "andi r12, r12, ~%0;" \
61 "mts rmsr, r12;" \
62 "nop;" \
63 : : "i" (MSR_ICE) : "memory", "r12");
Michal Simek2ee2ff82009-12-10 11:43:57 +010064}
65
66static inline void __enable_dcache_nomsr(void)
67{
Michal Simek6bd55f02012-12-27 10:40:38 +010068 __asm__ __volatile__ (" mfs r12, rmsr;" \
69 "nop;" \
70 "ori r12, r12, %0;" \
71 "mts rmsr, r12;" \
72 "nop;" \
73 : : "i" (MSR_DCE) : "memory", "r12");
Michal Simek2ee2ff82009-12-10 11:43:57 +010074}
75
76static inline void __disable_dcache_nomsr(void)
77{
Michal Simek6bd55f02012-12-27 10:40:38 +010078 __asm__ __volatile__ (" mfs r12, rmsr;" \
79 "nop;" \
80 "andi r12, r12, ~%0;" \
81 "mts rmsr, r12;" \
82 "nop;" \
83 : : "i" (MSR_DCE) : "memory", "r12");
Michal Simek2ee2ff82009-12-10 11:43:57 +010084}
85
86
Michal Simek3274c572010-04-26 08:54:13 +020087/* Helper macro for computing the limits of cache range loops
88 *
89 * End address can be unaligned which is OK for C implementation.
90 * ASM implementation align it in ASM macros
91 */
Michal Simek2ee2ff82009-12-10 11:43:57 +010092#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
93do { \
94 int align = ~(cache_line_length - 1); \
95 end = min(start + cache_size, end); \
96 start &= align; \
Michal Simek6bd55f02012-12-27 10:40:38 +010097} while (0)
Michal Simek2ee2ff82009-12-10 11:43:57 +010098
99/*
100 * Helper macro to loop over the specified cache_size/line_length and
101 * execute 'op' on that cacheline
102 */
103#define CACHE_ALL_LOOP(cache_size, line_length, op) \
104do { \
Michal Simek3274c572010-04-26 08:54:13 +0200105 unsigned int len = cache_size - line_length; \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100106 int step = -line_length; \
Michal Simek3274c572010-04-26 08:54:13 +0200107 WARN_ON(step >= 0); \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100108 \
Michal Simek6bd55f02012-12-27 10:40:38 +0100109 __asm__ __volatile__ (" 1: " #op " %0, r0;" \
110 "bgtid %0, 1b;" \
111 "addk %0, %0, %1;" \
112 : : "r" (len), "r" (step) \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100113 : "memory"); \
Michal Simek6bd55f02012-12-27 10:40:38 +0100114} while (0)
Michal Simek2ee2ff82009-12-10 11:43:57 +0100115
Michal Simek3274c572010-04-26 08:54:13 +0200116/* Used for wdc.flush/clear which can use rB for offset which is not possible
117 * to use for simple wdc or wic.
118 *
119 * start address is cache aligned
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300120 * end address is not aligned, if end is aligned then I have to subtract
Michal Simek3274c572010-04-26 08:54:13 +0200121 * cacheline length because I can't flush/invalidate the next cacheline.
122 * If is not, I align it because I will flush/invalidate whole line.
123 */
Michal Simek2ee2ff82009-12-10 11:43:57 +0100124#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
125do { \
126 int step = -line_length; \
Michal Simek3274c572010-04-26 08:54:13 +0200127 int align = ~(line_length - 1); \
Michal Simekddfbc932010-05-13 10:55:47 +0200128 int count; \
Michal Simek3274c572010-04-26 08:54:13 +0200129 end = ((end & align) == end) ? end - line_length : end & align; \
Michal Simekddfbc932010-05-13 10:55:47 +0200130 count = end - start; \
Michal Simek3274c572010-04-26 08:54:13 +0200131 WARN_ON(count < 0); \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100132 \
Michal Simek6bd55f02012-12-27 10:40:38 +0100133 __asm__ __volatile__ (" 1: " #op " %0, %1;" \
134 "bgtid %1, 1b;" \
135 "addk %1, %1, %2;" \
136 : : "r" (start), "r" (count), \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100137 "r" (step) : "memory"); \
Michal Simek6bd55f02012-12-27 10:40:38 +0100138} while (0)
Michal Simek2ee2ff82009-12-10 11:43:57 +0100139
140/* It is used only first parameter for OP - for wic, wdc */
141#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
142do { \
Michal Simek0d670b22010-02-15 10:50:42 +0100143 int volatile temp; \
Michal Simek3274c572010-04-26 08:54:13 +0200144 int align = ~(line_length - 1); \
145 end = ((end & align) == end) ? end - line_length : end & align; \
146 WARN_ON(end - start < 0); \
Michal Simek2ee2ff82009-12-10 11:43:57 +0100147 \
Michal Simek6bd55f02012-12-27 10:40:38 +0100148 __asm__ __volatile__ (" 1: " #op " %1, r0;" \
149 "cmpu %0, %1, %2;" \
150 "bgtid %0, 1b;" \
151 "addk %1, %1, %3;" \
152 : : "r" (temp), "r" (start), "r" (end), \
Michal Simek0d670b22010-02-15 10:50:42 +0100153 "r" (line_length) : "memory"); \
Michal Simek6bd55f02012-12-27 10:40:38 +0100154} while (0)
Michal Simek2ee2ff82009-12-10 11:43:57 +0100155
Michal Simek22607a22010-02-15 16:41:40 +0100156#define ASM_LOOP
157
Michal Simek2ee2ff82009-12-10 11:43:57 +0100158static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
159{
160 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100161#ifndef ASM_LOOP
162 int i;
163#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100164 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
165 (unsigned int)start, (unsigned int) end);
166
167 CACHE_LOOP_LIMITS(start, end,
168 cpuinfo.icache_line_length, cpuinfo.icache_size);
169
170 local_irq_save(flags);
171 __disable_icache_msr();
172
Michal Simek22607a22010-02-15 16:41:40 +0100173#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100174 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100175#else
176 for (i = start; i < end; i += cpuinfo.icache_line_length)
177 __asm__ __volatile__ ("wic %0, r0;" \
178 : : "r" (i));
179#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100180 __enable_icache_msr();
181 local_irq_restore(flags);
182}
183
184static void __flush_icache_range_nomsr_irq(unsigned long start,
185 unsigned long end)
186{
187 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100188#ifndef ASM_LOOP
189 int i;
190#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100191 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
192 (unsigned int)start, (unsigned int) end);
193
194 CACHE_LOOP_LIMITS(start, end,
195 cpuinfo.icache_line_length, cpuinfo.icache_size);
196
197 local_irq_save(flags);
198 __disable_icache_nomsr();
199
Michal Simek22607a22010-02-15 16:41:40 +0100200#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100201 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100202#else
203 for (i = start; i < end; i += cpuinfo.icache_line_length)
204 __asm__ __volatile__ ("wic %0, r0;" \
205 : : "r" (i));
206#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100207
208 __enable_icache_nomsr();
209 local_irq_restore(flags);
210}
211
212static void __flush_icache_range_noirq(unsigned long start,
213 unsigned long end)
214{
Michal Simek22607a22010-02-15 16:41:40 +0100215#ifndef ASM_LOOP
216 int i;
217#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100218 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
219 (unsigned int)start, (unsigned int) end);
220
221 CACHE_LOOP_LIMITS(start, end,
222 cpuinfo.icache_line_length, cpuinfo.icache_size);
Michal Simek22607a22010-02-15 16:41:40 +0100223#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100224 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100225#else
226 for (i = start; i < end; i += cpuinfo.icache_line_length)
227 __asm__ __volatile__ ("wic %0, r0;" \
228 : : "r" (i));
229#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100230}
231
232static void __flush_icache_all_msr_irq(void)
233{
234 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100235#ifndef ASM_LOOP
236 int i;
237#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100238 pr_debug("%s\n", __func__);
239
240 local_irq_save(flags);
241 __disable_icache_msr();
Michal Simek22607a22010-02-15 16:41:40 +0100242#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100243 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100244#else
245 for (i = 0; i < cpuinfo.icache_size;
246 i += cpuinfo.icache_line_length)
247 __asm__ __volatile__ ("wic %0, r0;" \
248 : : "r" (i));
249#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100250 __enable_icache_msr();
251 local_irq_restore(flags);
252}
253
254static void __flush_icache_all_nomsr_irq(void)
255{
256 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100257#ifndef ASM_LOOP
258 int i;
259#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100260 pr_debug("%s\n", __func__);
261
262 local_irq_save(flags);
263 __disable_icache_nomsr();
Michal Simek22607a22010-02-15 16:41:40 +0100264#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100265 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100266#else
267 for (i = 0; i < cpuinfo.icache_size;
268 i += cpuinfo.icache_line_length)
269 __asm__ __volatile__ ("wic %0, r0;" \
270 : : "r" (i));
271#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100272 __enable_icache_nomsr();
273 local_irq_restore(flags);
274}
275
276static void __flush_icache_all_noirq(void)
277{
Michal Simek22607a22010-02-15 16:41:40 +0100278#ifndef ASM_LOOP
279 int i;
280#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100281 pr_debug("%s\n", __func__);
Michal Simek22607a22010-02-15 16:41:40 +0100282#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100283 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
Michal Simek22607a22010-02-15 16:41:40 +0100284#else
285 for (i = 0; i < cpuinfo.icache_size;
286 i += cpuinfo.icache_line_length)
287 __asm__ __volatile__ ("wic %0, r0;" \
288 : : "r" (i));
289#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100290}
291
292static void __invalidate_dcache_all_msr_irq(void)
293{
294 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100295#ifndef ASM_LOOP
296 int i;
297#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100298 pr_debug("%s\n", __func__);
299
300 local_irq_save(flags);
301 __disable_dcache_msr();
Michal Simek22607a22010-02-15 16:41:40 +0100302#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100303 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100304#else
305 for (i = 0; i < cpuinfo.dcache_size;
306 i += cpuinfo.dcache_line_length)
307 __asm__ __volatile__ ("wdc %0, r0;" \
308 : : "r" (i));
309#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100310 __enable_dcache_msr();
311 local_irq_restore(flags);
312}
313
314static void __invalidate_dcache_all_nomsr_irq(void)
315{
316 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100317#ifndef ASM_LOOP
318 int i;
319#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100320 pr_debug("%s\n", __func__);
321
322 local_irq_save(flags);
323 __disable_dcache_nomsr();
Michal Simek22607a22010-02-15 16:41:40 +0100324#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100325 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100326#else
327 for (i = 0; i < cpuinfo.dcache_size;
328 i += cpuinfo.dcache_line_length)
329 __asm__ __volatile__ ("wdc %0, r0;" \
330 : : "r" (i));
331#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100332 __enable_dcache_nomsr();
333 local_irq_restore(flags);
334}
335
336static void __invalidate_dcache_all_noirq_wt(void)
337{
Michal Simek22607a22010-02-15 16:41:40 +0100338#ifndef ASM_LOOP
339 int i;
340#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100341 pr_debug("%s\n", __func__);
Michal Simek22607a22010-02-15 16:41:40 +0100342#ifdef ASM_LOOP
Michal Simek6bd55f02012-12-27 10:40:38 +0100343 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100344#else
345 for (i = 0; i < cpuinfo.dcache_size;
346 i += cpuinfo.dcache_line_length)
347 __asm__ __volatile__ ("wdc %0, r0;" \
348 : : "r" (i));
349#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100350}
351
Michal Simek6bd55f02012-12-27 10:40:38 +0100352/*
353 * FIXME It is blindly invalidation as is expected
Michal Simek3274c572010-04-26 08:54:13 +0200354 * but can't be called on noMMU in microblaze_cache_init below
355 *
356 * MS: noMMU kernel won't boot if simple wdc is used
357 * The reason should be that there are discared data which kernel needs
358 */
Michal Simek2ee2ff82009-12-10 11:43:57 +0100359static void __invalidate_dcache_all_wb(void)
360{
Michal Simek22607a22010-02-15 16:41:40 +0100361#ifndef ASM_LOOP
362 int i;
363#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100364 pr_debug("%s\n", __func__);
Michal Simek22607a22010-02-15 16:41:40 +0100365#ifdef ASM_LOOP
Michal Simek3274c572010-04-26 08:54:13 +0200366 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
Michal Simek6bd55f02012-12-27 10:40:38 +0100367 wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100368#else
369 for (i = 0; i < cpuinfo.dcache_size;
370 i += cpuinfo.dcache_line_length)
Michal Simek3274c572010-04-26 08:54:13 +0200371 __asm__ __volatile__ ("wdc %0, r0;" \
Michal Simek22607a22010-02-15 16:41:40 +0100372 : : "r" (i));
373#endif
Michal Simek8beb8502009-03-27 14:25:16 +0100374}
375
Michal Simek2ee2ff82009-12-10 11:43:57 +0100376static void __invalidate_dcache_range_wb(unsigned long start,
377 unsigned long end)
Michal Simek8beb8502009-03-27 14:25:16 +0100378{
Michal Simek22607a22010-02-15 16:41:40 +0100379#ifndef ASM_LOOP
380 int i;
381#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100382 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
383 (unsigned int)start, (unsigned int) end);
384
385 CACHE_LOOP_LIMITS(start, end,
386 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
Michal Simek22607a22010-02-15 16:41:40 +0100387#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100388 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
Michal Simek22607a22010-02-15 16:41:40 +0100389#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200390 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100391 __asm__ __volatile__ ("wdc.clear %0, r0;" \
392 : : "r" (i));
393#endif
Michal Simek8beb8502009-03-27 14:25:16 +0100394}
395
Michal Simek2ee2ff82009-12-10 11:43:57 +0100396static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
397 unsigned long end)
Michal Simek8beb8502009-03-27 14:25:16 +0100398{
Michal Simek22607a22010-02-15 16:41:40 +0100399#ifndef ASM_LOOP
400 int i;
401#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100402 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
403 (unsigned int)start, (unsigned int) end);
404 CACHE_LOOP_LIMITS(start, end,
405 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
Michal Simek8beb8502009-03-27 14:25:16 +0100406
Michal Simek22607a22010-02-15 16:41:40 +0100407#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100408 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100409#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200410 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100411 __asm__ __volatile__ ("wdc %0, r0;" \
412 : : "r" (i));
413#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100414}
Michal Simek8beb8502009-03-27 14:25:16 +0100415
Michal Simek2ee2ff82009-12-10 11:43:57 +0100416static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
417 unsigned long end)
418{
419 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100420#ifndef ASM_LOOP
421 int i;
422#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100423 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
424 (unsigned int)start, (unsigned int) end);
425 CACHE_LOOP_LIMITS(start, end,
426 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
427
428 local_irq_save(flags);
429 __disable_dcache_msr();
430
Michal Simek22607a22010-02-15 16:41:40 +0100431#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100432 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100433#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200434 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100435 __asm__ __volatile__ ("wdc %0, r0;" \
436 : : "r" (i));
437#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100438
439 __enable_dcache_msr();
440 local_irq_restore(flags);
441}
442
443static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
444 unsigned long end)
445{
446 unsigned long flags;
Michal Simek22607a22010-02-15 16:41:40 +0100447#ifndef ASM_LOOP
448 int i;
449#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100450 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
451 (unsigned int)start, (unsigned int) end);
452
453 CACHE_LOOP_LIMITS(start, end,
454 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
455
456 local_irq_save(flags);
457 __disable_dcache_nomsr();
458
Michal Simek22607a22010-02-15 16:41:40 +0100459#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100460 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
Michal Simek22607a22010-02-15 16:41:40 +0100461#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200462 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100463 __asm__ __volatile__ ("wdc %0, r0;" \
464 : : "r" (i));
465#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100466
467 __enable_dcache_nomsr();
468 local_irq_restore(flags);
469}
470
471static void __flush_dcache_all_wb(void)
472{
Michal Simek22607a22010-02-15 16:41:40 +0100473#ifndef ASM_LOOP
474 int i;
475#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100476 pr_debug("%s\n", __func__);
Michal Simek22607a22010-02-15 16:41:40 +0100477#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100478 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
479 wdc.flush);
Michal Simek22607a22010-02-15 16:41:40 +0100480#else
481 for (i = 0; i < cpuinfo.dcache_size;
482 i += cpuinfo.dcache_line_length)
483 __asm__ __volatile__ ("wdc.flush %0, r0;" \
484 : : "r" (i));
485#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100486}
487
488static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
489{
Michal Simek22607a22010-02-15 16:41:40 +0100490#ifndef ASM_LOOP
491 int i;
492#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100493 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
494 (unsigned int)start, (unsigned int) end);
495
496 CACHE_LOOP_LIMITS(start, end,
497 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
Michal Simek22607a22010-02-15 16:41:40 +0100498#ifdef ASM_LOOP
Michal Simek2ee2ff82009-12-10 11:43:57 +0100499 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
Michal Simek22607a22010-02-15 16:41:40 +0100500#else
Michal Simekc17e1a12010-04-23 11:38:43 +0200501 for (i = start; i < end; i += cpuinfo.dcache_line_length)
Michal Simek22607a22010-02-15 16:41:40 +0100502 __asm__ __volatile__ ("wdc.flush %0, r0;" \
503 : : "r" (i));
504#endif
Michal Simek2ee2ff82009-12-10 11:43:57 +0100505}
506
507/* struct for wb caches and for wt caches */
508struct scache *mbc;
509
510/* new wb cache model */
Michal Simek954e8b92011-02-07 12:21:42 +0100511static const struct scache wb_msr = {
Michal Simek2ee2ff82009-12-10 11:43:57 +0100512 .ie = __enable_icache_msr,
513 .id = __disable_icache_msr,
514 .ifl = __flush_icache_all_noirq,
515 .iflr = __flush_icache_range_noirq,
516 .iin = __flush_icache_all_noirq,
517 .iinr = __flush_icache_range_noirq,
518 .de = __enable_dcache_msr,
519 .dd = __disable_dcache_msr,
520 .dfl = __flush_dcache_all_wb,
521 .dflr = __flush_dcache_range_wb,
522 .din = __invalidate_dcache_all_wb,
523 .dinr = __invalidate_dcache_range_wb,
524};
525
526/* There is only difference in ie, id, de, dd functions */
Michal Simek954e8b92011-02-07 12:21:42 +0100527static const struct scache wb_nomsr = {
Michal Simek2ee2ff82009-12-10 11:43:57 +0100528 .ie = __enable_icache_nomsr,
529 .id = __disable_icache_nomsr,
530 .ifl = __flush_icache_all_noirq,
531 .iflr = __flush_icache_range_noirq,
532 .iin = __flush_icache_all_noirq,
533 .iinr = __flush_icache_range_noirq,
534 .de = __enable_dcache_nomsr,
535 .dd = __disable_dcache_nomsr,
536 .dfl = __flush_dcache_all_wb,
537 .dflr = __flush_dcache_range_wb,
538 .din = __invalidate_dcache_all_wb,
539 .dinr = __invalidate_dcache_range_wb,
540};
541
542/* Old wt cache model with disabling irq and turn off cache */
Michal Simek954e8b92011-02-07 12:21:42 +0100543static const struct scache wt_msr = {
Michal Simek2ee2ff82009-12-10 11:43:57 +0100544 .ie = __enable_icache_msr,
545 .id = __disable_icache_msr,
546 .ifl = __flush_icache_all_msr_irq,
547 .iflr = __flush_icache_range_msr_irq,
548 .iin = __flush_icache_all_msr_irq,
549 .iinr = __flush_icache_range_msr_irq,
550 .de = __enable_dcache_msr,
551 .dd = __disable_dcache_msr,
552 .dfl = __invalidate_dcache_all_msr_irq,
553 .dflr = __invalidate_dcache_range_msr_irq_wt,
554 .din = __invalidate_dcache_all_msr_irq,
555 .dinr = __invalidate_dcache_range_msr_irq_wt,
556};
557
Michal Simek954e8b92011-02-07 12:21:42 +0100558static const struct scache wt_nomsr = {
Michal Simek2ee2ff82009-12-10 11:43:57 +0100559 .ie = __enable_icache_nomsr,
560 .id = __disable_icache_nomsr,
561 .ifl = __flush_icache_all_nomsr_irq,
562 .iflr = __flush_icache_range_nomsr_irq,
563 .iin = __flush_icache_all_nomsr_irq,
564 .iinr = __flush_icache_range_nomsr_irq,
565 .de = __enable_dcache_nomsr,
566 .dd = __disable_dcache_nomsr,
567 .dfl = __invalidate_dcache_all_nomsr_irq,
568 .dflr = __invalidate_dcache_range_nomsr_irq,
569 .din = __invalidate_dcache_all_nomsr_irq,
570 .dinr = __invalidate_dcache_range_nomsr_irq,
571};
572
573/* New wt cache model for newer Microblaze versions */
Michal Simek954e8b92011-02-07 12:21:42 +0100574static const struct scache wt_msr_noirq = {
Michal Simek2ee2ff82009-12-10 11:43:57 +0100575 .ie = __enable_icache_msr,
576 .id = __disable_icache_msr,
577 .ifl = __flush_icache_all_noirq,
578 .iflr = __flush_icache_range_noirq,
579 .iin = __flush_icache_all_noirq,
580 .iinr = __flush_icache_range_noirq,
581 .de = __enable_dcache_msr,
582 .dd = __disable_dcache_msr,
583 .dfl = __invalidate_dcache_all_noirq_wt,
584 .dflr = __invalidate_dcache_range_nomsr_wt,
585 .din = __invalidate_dcache_all_noirq_wt,
586 .dinr = __invalidate_dcache_range_nomsr_wt,
587};
588
Michal Simek954e8b92011-02-07 12:21:42 +0100589static const struct scache wt_nomsr_noirq = {
Michal Simek2ee2ff82009-12-10 11:43:57 +0100590 .ie = __enable_icache_nomsr,
591 .id = __disable_icache_nomsr,
592 .ifl = __flush_icache_all_noirq,
593 .iflr = __flush_icache_range_noirq,
594 .iin = __flush_icache_all_noirq,
595 .iinr = __flush_icache_range_noirq,
596 .de = __enable_dcache_nomsr,
597 .dd = __disable_dcache_nomsr,
598 .dfl = __invalidate_dcache_all_noirq_wt,
599 .dflr = __invalidate_dcache_range_nomsr_wt,
600 .din = __invalidate_dcache_all_noirq_wt,
601 .dinr = __invalidate_dcache_range_nomsr_wt,
602};
603
604/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
605#define CPUVER_7_20_A 0x0c
606#define CPUVER_7_20_D 0x0f
607
Michal Simek2ee2ff82009-12-10 11:43:57 +0100608void microblaze_cache_init(void)
609{
610 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
611 if (cpuinfo.dcache_wb) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100612 pr_info("wb_msr\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100613 mbc = (struct scache *)&wb_msr;
Michal Simekb9dc9e72010-05-31 21:16:30 +0200614 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
Michal Simek2ee2ff82009-12-10 11:43:57 +0100615 /* MS: problem with signal handling - hw bug */
Michal Simek6bd55f02012-12-27 10:40:38 +0100616 pr_info("WB won't work properly\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100617 }
618 } else {
619 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100620 pr_info("wt_msr_noirq\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100621 mbc = (struct scache *)&wt_msr_noirq;
622 } else {
Michal Simek6bd55f02012-12-27 10:40:38 +0100623 pr_info("wt_msr\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100624 mbc = (struct scache *)&wt_msr;
625 }
626 }
627 } else {
628 if (cpuinfo.dcache_wb) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100629 pr_info("wb_nomsr\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100630 mbc = (struct scache *)&wb_nomsr;
Michal Simekb9dc9e72010-05-31 21:16:30 +0200631 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
Michal Simek2ee2ff82009-12-10 11:43:57 +0100632 /* MS: problem with signal handling - hw bug */
Michal Simek6bd55f02012-12-27 10:40:38 +0100633 pr_info("WB won't work properly\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100634 }
635 } else {
636 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100637 pr_info("wt_nomsr_noirq\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100638 mbc = (struct scache *)&wt_nomsr_noirq;
639 } else {
Michal Simek6bd55f02012-12-27 10:40:38 +0100640 pr_info("wt_nomsr\n");
Michal Simek2ee2ff82009-12-10 11:43:57 +0100641 mbc = (struct scache *)&wt_nomsr;
642 }
643 }
Michal Simek8beb8502009-03-27 14:25:16 +0100644 }
Michal Simek6bd55f02012-12-27 10:40:38 +0100645 /*
646 * FIXME Invalidation is done in U-BOOT
647 * WT cache: Data is already written to main memory
648 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
649 */
Michal Simek3274c572010-04-26 08:54:13 +0200650 /* invalidate_dcache(); */
Michal Simek407c1da2010-01-12 14:51:04 +0100651 enable_dcache();
652
653 invalidate_icache();
654 enable_icache();
Michal Simek8beb8502009-03-27 14:25:16 +0100655}