blob: 4b12890642eb84289abd01772a5ee9121616aeec [file] [log] [blame]
Alexander Beregalov071327e2009-04-03 01:49:22 +00001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Helge Deller67a5a592006-03-27 19:52:14 +00006 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040020#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/pdc.h>
22#include <asm/cache.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/processor.h>
Stuart Brady24642122005-10-21 22:44:14 -040028#include <asm/sections.h>
James Bottomleyf3118472010-12-22 10:22:11 -060029#include <asm/shmparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Helge Deller8039de12006-01-10 20:35:03 -050031int split_tlb __read_mostly;
32int dcache_stride __read_mostly;
33int icache_stride __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034EXPORT_SYMBOL(dcache_stride);
35
James Bottomleyf3118472010-12-22 10:22:11 -060036void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);
38void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
44 * ensure this.
45 */
46DEFINE_SPINLOCK(pa_tlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Helge Deller8039de12006-01-10 20:35:03 -050048struct pdc_cache_info cache_info __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#ifndef CONFIG_PA20
Helge Deller8039de12006-01-10 20:35:03 -050050static struct pdc_btlb_info btlb_info __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#endif
52
53#ifdef CONFIG_SMP
54void
55flush_data_cache(void)
56{
Jens Axboe15c8b6c2008-05-09 09:39:44 +020057 on_each_cpu(flush_data_cache_local, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59void
60flush_instruction_cache(void)
61{
Jens Axboe15c8b6c2008-05-09 09:39:44 +020062 on_each_cpu(flush_instruction_cache_local, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063}
64#endif
65
66void
67flush_cache_all_local(void)
68{
Matthew Wilcox1b2425e2006-01-10 20:47:49 -050069 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72EXPORT_SYMBOL(flush_cache_all_local);
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074void
Russell King4b3073e2009-12-18 16:40:18 +000075update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
Russell King4b3073e2009-12-18 16:40:18 +000077 struct page *page = pte_page(*ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
80 test_bit(PG_dcache_dirty, &page->flags)) {
81
James Bottomleyba575832006-03-22 09:42:04 -070082 flush_kernel_dcache_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 clear_bit(PG_dcache_dirty, &page->flags);
James Bottomley20f4d3c2006-08-23 09:00:04 -070084 } else if (parisc_requires_coherency())
85 flush_kernel_dcache_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
88void
89show_cache_info(struct seq_file *m)
90{
Kyle McMartine5a2e7f2006-06-14 20:26:25 +000091 char buf[32];
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 seq_printf(m, "I-cache\t\t: %ld KB\n",
94 cache_info.ic_size/1024 );
Helge Deller2f75c122007-01-23 21:24:20 +010095 if (cache_info.dc_loop != 1)
Kyle McMartine5a2e7f2006-06-14 20:26:25 +000096 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
97 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 cache_info.dc_size/1024,
99 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
100 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
Kyle McMartine5a2e7f2006-06-14 20:26:25 +0000101 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
103 cache_info.it_size,
104 cache_info.dt_size,
105 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
106 );
107
108#ifndef CONFIG_PA20
109 /* BTLB - Block TLB */
110 if (btlb_info.max_size==0) {
111 seq_printf(m, "BTLB\t\t: not supported\n" );
112 } else {
113 seq_printf(m,
114 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
115 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
116 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
117 btlb_info.max_size, (int)4096,
118 btlb_info.max_size>>8,
119 btlb_info.fixed_range_info.num_i,
120 btlb_info.fixed_range_info.num_d,
121 btlb_info.fixed_range_info.num_comb,
122 btlb_info.variable_range_info.num_i,
123 btlb_info.variable_range_info.num_d,
124 btlb_info.variable_range_info.num_comb
125 );
126 }
127#endif
128}
129
130void __init
131parisc_cache_init(void)
132{
133 if (pdc_cache_info(&cache_info) < 0)
134 panic("parisc_cache_init: pdc_cache_info failed");
135
136#if 0
137 printk("ic_size %lx dc_size %lx it_size %lx\n",
138 cache_info.ic_size,
139 cache_info.dc_size,
140 cache_info.it_size);
141
142 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
143 cache_info.dc_base,
144 cache_info.dc_stride,
145 cache_info.dc_count,
146 cache_info.dc_loop);
147
148 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
149 *(unsigned long *) (&cache_info.dc_conf),
150 cache_info.dc_conf.cc_alias,
151 cache_info.dc_conf.cc_block,
152 cache_info.dc_conf.cc_line,
153 cache_info.dc_conf.cc_shift);
Kyle McMartine5a2e7f2006-06-14 20:26:25 +0000154 printk(" wt %d sh %d cst %d hv %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 cache_info.dc_conf.cc_wt,
156 cache_info.dc_conf.cc_sh,
157 cache_info.dc_conf.cc_cst,
Kyle McMartine5a2e7f2006-06-14 20:26:25 +0000158 cache_info.dc_conf.cc_hv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
161 cache_info.ic_base,
162 cache_info.ic_stride,
163 cache_info.ic_count,
164 cache_info.ic_loop);
165
166 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
167 *(unsigned long *) (&cache_info.ic_conf),
168 cache_info.ic_conf.cc_alias,
169 cache_info.ic_conf.cc_block,
170 cache_info.ic_conf.cc_line,
171 cache_info.ic_conf.cc_shift);
Kyle McMartine5a2e7f2006-06-14 20:26:25 +0000172 printk(" wt %d sh %d cst %d hv %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 cache_info.ic_conf.cc_wt,
174 cache_info.ic_conf.cc_sh,
175 cache_info.ic_conf.cc_cst,
Kyle McMartine5a2e7f2006-06-14 20:26:25 +0000176 cache_info.ic_conf.cc_hv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Frans Popa3bee032010-02-06 17:47:14 +0000178 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 cache_info.dt_conf.tc_sh,
180 cache_info.dt_conf.tc_page,
181 cache_info.dt_conf.tc_cst,
182 cache_info.dt_conf.tc_aid,
183 cache_info.dt_conf.tc_pad1);
184
Frans Popa3bee032010-02-06 17:47:14 +0000185 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 cache_info.it_conf.tc_sh,
187 cache_info.it_conf.tc_page,
188 cache_info.it_conf.tc_cst,
189 cache_info.it_conf.tc_aid,
190 cache_info.it_conf.tc_pad1);
191#endif
192
193 split_tlb = 0;
194 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
195 if (cache_info.dt_conf.tc_sh == 2)
196 printk(KERN_WARNING "Unexpected TLB configuration. "
197 "Will flush I/D separately (could be optimized).\n");
198
199 split_tlb = 1;
200 }
201
202 /* "New and Improved" version from Jim Hull
203 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
Stuart Brady24642122005-10-21 22:44:14 -0400204 * The following CAFL_STRIDE is an optimized version, see
205 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
206 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 */
208#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
209 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
210 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
211#undef CAFL_STRIDE
212
213#ifndef CONFIG_PA20
214 if (pdc_btlb_info(&btlb_info) < 0) {
215 memset(&btlb_info, 0, sizeof btlb_info);
216 }
217#endif
218
219 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
220 PDC_MODEL_NVA_UNSUPPORTED) {
221 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
222#if 0
223 panic("SMP kernel required to avoid non-equivalent aliasing");
224#endif
225 }
226}
227
228void disable_sr_hashing(void)
229{
Kyle McMartina9d2d382006-06-16 18:20:00 -0400230 int srhash_type, retval;
231 unsigned long space_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233 switch (boot_cpu_data.cpu_type) {
234 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
235 BUG();
236 return;
237
238 case pcxs:
239 case pcxt:
240 case pcxt_:
241 srhash_type = SRHASH_PCXST;
242 break;
243
244 case pcxl:
245 srhash_type = SRHASH_PCXL;
246 break;
247
248 case pcxl2: /* pcxl2 doesn't support space register hashing */
249 return;
250
251 default: /* Currently all PA2.0 machines use the same ins. sequence */
252 srhash_type = SRHASH_PA20;
253 break;
254 }
255
256 disable_sr_hashing_asm(srhash_type);
Kyle McMartina9d2d382006-06-16 18:20:00 -0400257
258 retval = pdc_spaceid_bits(&space_bits);
259 /* If this procedure isn't implemented, don't panic. */
260 if (retval < 0 && retval != PDC_BAD_OPTION)
261 panic("pdc_spaceid_bits call failed.\n");
262 if (space_bits != 0)
263 panic("SpaceID hashing is still on!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
Randolph Chungd6ce8622006-12-12 05:51:54 -0800266static inline void
James Bottomleyf3118472010-12-22 10:22:11 -0600267__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
268 unsigned long physaddr)
Randolph Chungd6ce8622006-12-12 05:51:54 -0800269{
John David Anglin027f27c2013-02-02 23:41:24 +0000270 preempt_disable();
James Bottomleyf3118472010-12-22 10:22:11 -0600271 flush_dcache_page_asm(physaddr, vmaddr);
272 if (vma->vm_flags & VM_EXEC)
273 flush_icache_page_asm(physaddr, vmaddr);
John David Anglin027f27c2013-02-02 23:41:24 +0000274 preempt_enable();
Randolph Chungd6ce8622006-12-12 05:51:54 -0800275}
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277void flush_dcache_page(struct page *page)
278{
279 struct address_space *mapping = page_mapping(page);
280 struct vm_area_struct *mpnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 unsigned long offset;
James Bottomleyf3118472010-12-22 10:22:11 -0600282 unsigned long addr, old_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 pgoff_t pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285 if (mapping && !mapping_mapped(mapping)) {
286 set_bit(PG_dcache_dirty, &page->flags);
287 return;
288 }
289
James Bottomleyba575832006-03-22 09:42:04 -0700290 flush_kernel_dcache_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292 if (!mapping)
293 return;
294
295 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
296
297 /* We have carefully arranged in arch_get_unmapped_area() that
298 * *any* mappings of a file are always congruently mapped (whether
299 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
300 * to flush one address here for them all to become coherent */
301
302 flush_dcache_mmap_lock(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700303 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
305 addr = mpnt->vm_start + offset;
306
James Bottomleyb7d45812011-04-15 12:37:22 -0500307 /* The TLB is the engine of coherence on parisc: The
308 * CPU is entitled to speculate any page with a TLB
309 * mapping, so here we kill the mapping then flush the
310 * page along a special flush only alias mapping.
311 * This guarantees that the page is no-longer in the
312 * cache for any process and nor may it be
313 * speculatively read in (until the user or kernel
314 * specifically accesses it, of course) */
315
316 flush_tlb_page(mpnt, addr);
James Bottomleyf3118472010-12-22 10:22:11 -0600317 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
318 __flush_cache_page(mpnt, addr, page_to_phys(page));
319 if (old_addr)
James Bottomleyb7d45812011-04-15 12:37:22 -0500320 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
James Bottomleyf3118472010-12-22 10:22:11 -0600321 old_addr = addr;
Hugh Dickins92dc6fc2005-10-29 18:16:36 -0700322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 }
324 flush_dcache_mmap_unlock(mapping);
325}
326EXPORT_SYMBOL(flush_dcache_page);
327
328/* Defined in arch/parisc/kernel/pacache.S */
329EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
James Bottomleyba575832006-03-22 09:42:04 -0700330EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331EXPORT_SYMBOL(flush_data_cache_local);
332EXPORT_SYMBOL(flush_kernel_icache_range_asm);
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
Helge Deller8039de12006-01-10 20:35:03 -0500335int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Randolph Chungd6ce8622006-12-12 05:51:54 -0800337void __init parisc_setup_cache_timing(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 unsigned long rangetime, alltime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 unsigned long size;
341
342 alltime = mfctl(16);
343 flush_data_cache();
344 alltime = mfctl(16) - alltime;
345
Stuart Brady24642122005-10-21 22:44:14 -0400346 size = (unsigned long)(_end - _text);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 rangetime = mfctl(16);
Stuart Brady24642122005-10-21 22:44:14 -0400348 flush_kernel_dcache_range((unsigned long)_text, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 rangetime = mfctl(16) - rangetime;
350
351 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
352 alltime, size, rangetime);
353
354 /* Racy, but if we see an intermediate value, it's ok too... */
355 parisc_cache_flush_threshold = size * alltime / rangetime;
356
357 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
358 if (!parisc_cache_flush_threshold)
359 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
360
Randolph Chungd6ce8622006-12-12 05:51:54 -0800361 if (parisc_cache_flush_threshold > cache_info.dc_size)
362 parisc_cache_flush_threshold = cache_info.dc_size;
363
Helge Deller67a5a592006-03-27 19:52:14 +0000364 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365}
James Bottomley20f4d3c2006-08-23 09:00:04 -0700366
John David Anglin76334532013-02-03 22:59:09 +0000367extern void purge_kernel_dcache_page_asm(unsigned long);
368extern void clear_user_page_asm(void *, unsigned long);
369extern void copy_user_page_asm(void *, void *, unsigned long);
James Bottomley20f4d3c2006-08-23 09:00:04 -0700370
371void flush_kernel_dcache_page_addr(void *addr)
372{
Helge Dellere82a3b72009-06-16 20:51:48 +0000373 unsigned long flags;
374
James Bottomley20f4d3c2006-08-23 09:00:04 -0700375 flush_kernel_dcache_page_asm(addr);
Helge Dellere82a3b72009-06-16 20:51:48 +0000376 purge_tlb_start(flags);
James Bottomley20f4d3c2006-08-23 09:00:04 -0700377 pdtlb_kernel(addr);
Helge Dellere82a3b72009-06-16 20:51:48 +0000378 purge_tlb_end(flags);
James Bottomley20f4d3c2006-08-23 09:00:04 -0700379}
380EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
381
John David Anglin76334532013-02-03 22:59:09 +0000382void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
James Bottomley20f4d3c2006-08-23 09:00:04 -0700383{
John David Anglin76334532013-02-03 22:59:09 +0000384 clear_page_asm(vto);
385 if (!parisc_requires_coherency())
386 flush_kernel_dcache_page_asm(vto);
387}
388EXPORT_SYMBOL(clear_user_page);
389
390void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
391 struct page *pg)
392{
393 /* Copy using kernel mapping. No coherency is needed
394 (all in kmap/kunmap) on machines that don't support
395 non-equivalent aliasing. However, the `from' page
396 needs to be flushed before it can be accessed through
397 the kernel mapping. */
398 preempt_disable();
399 flush_dcache_page_asm(__pa(vfrom), vaddr);
400 preempt_enable();
401 copy_page_asm(vto, vfrom);
James Bottomley20f4d3c2006-08-23 09:00:04 -0700402 if (!parisc_requires_coherency())
403 flush_kernel_dcache_page_asm(vto);
404}
405EXPORT_SYMBOL(copy_user_page);
406
407#ifdef CONFIG_PA8X00
408
409void kunmap_parisc(void *addr)
410{
411 if (parisc_requires_coherency())
412 flush_kernel_dcache_page_addr(addr);
413}
414EXPORT_SYMBOL(kunmap_parisc);
415#endif
Randolph Chungd6ce8622006-12-12 05:51:54 -0800416
John David Anglin7139bc12013-01-14 19:45:00 -0500417void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
418{
419 unsigned long flags;
420
421 /* Note: purge_tlb_entries can be called at startup with
422 no context. */
423
424 /* Disable preemption while we play with %sr1. */
425 preempt_disable();
426 mtsp(mm->context, 1);
427 purge_tlb_start(flags);
428 pdtlb(addr);
429 pitlb(addr);
430 purge_tlb_end(flags);
431 preempt_enable();
432}
433EXPORT_SYMBOL(purge_tlb_entries);
434
Randolph Chungd6ce8622006-12-12 05:51:54 -0800435void __flush_tlb_range(unsigned long sid, unsigned long start,
436 unsigned long end)
437{
438 unsigned long npages;
439
440 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
441 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
442 flush_tlb_all();
443 else {
Helge Dellere82a3b72009-06-16 20:51:48 +0000444 unsigned long flags;
445
Randolph Chungd6ce8622006-12-12 05:51:54 -0800446 mtsp(sid, 1);
Helge Dellere82a3b72009-06-16 20:51:48 +0000447 purge_tlb_start(flags);
Randolph Chungd6ce8622006-12-12 05:51:54 -0800448 if (split_tlb) {
449 while (npages--) {
450 pdtlb(start);
451 pitlb(start);
452 start += PAGE_SIZE;
453 }
454 } else {
455 while (npages--) {
456 pdtlb(start);
457 start += PAGE_SIZE;
458 }
459 }
Helge Dellere82a3b72009-06-16 20:51:48 +0000460 purge_tlb_end(flags);
Randolph Chungd6ce8622006-12-12 05:51:54 -0800461 }
462}
463
464static void cacheflush_h_tmp_function(void *dummy)
465{
466 flush_cache_all_local();
467}
468
469void flush_cache_all(void)
470{
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200471 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
Randolph Chungd6ce8622006-12-12 05:51:54 -0800472}
473
John David Anglin6d2439d2013-02-03 23:01:47 +0000474static inline unsigned long mm_total_size(struct mm_struct *mm)
475{
476 struct vm_area_struct *vma;
477 unsigned long usize = 0;
478
479 for (vma = mm->mmap; vma; vma = vma->vm_next)
480 usize += vma->vm_end - vma->vm_start;
481 return usize;
482}
483
484static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
485{
486 pte_t *ptep = NULL;
487
488 if (!pgd_none(*pgd)) {
489 pud_t *pud = pud_offset(pgd, addr);
490 if (!pud_none(*pud)) {
491 pmd_t *pmd = pmd_offset(pud, addr);
492 if (!pmd_none(*pmd))
493 ptep = pte_offset_map(pmd, addr);
494 }
495 }
496 return ptep;
497}
498
Randolph Chungd6ce8622006-12-12 05:51:54 -0800499void flush_cache_mm(struct mm_struct *mm)
500{
John David Anglin6d2439d2013-02-03 23:01:47 +0000501 /* Flushing the whole cache on each cpu takes forever on
502 rp3440, etc. So, avoid it if the mm isn't too big. */
503 if (mm_total_size(mm) < parisc_cache_flush_threshold) {
504 struct vm_area_struct *vma;
505
506 if (mm->context == mfsp(3)) {
507 for (vma = mm->mmap; vma; vma = vma->vm_next) {
508 flush_user_dcache_range_asm(vma->vm_start,
509 vma->vm_end);
510 if (vma->vm_flags & VM_EXEC)
511 flush_user_icache_range_asm(
512 vma->vm_start, vma->vm_end);
513 }
514 } else {
515 pgd_t *pgd = mm->pgd;
516
517 for (vma = mm->mmap; vma; vma = vma->vm_next) {
518 unsigned long addr;
519
520 for (addr = vma->vm_start; addr < vma->vm_end;
521 addr += PAGE_SIZE) {
522 pte_t *ptep = get_ptep(pgd, addr);
523 if (ptep != NULL) {
524 pte_t pte = *ptep;
525 __flush_cache_page(vma, addr,
526 page_to_phys(pte_page(pte)));
527 }
528 }
529 }
530 }
531 return;
532 }
533
Randolph Chungd6ce8622006-12-12 05:51:54 -0800534#ifdef CONFIG_SMP
535 flush_cache_all();
536#else
537 flush_cache_all_local();
538#endif
539}
540
541void
542flush_user_dcache_range(unsigned long start, unsigned long end)
543{
544 if ((end - start) < parisc_cache_flush_threshold)
545 flush_user_dcache_range_asm(start,end);
546 else
547 flush_data_cache();
548}
549
550void
551flush_user_icache_range(unsigned long start, unsigned long end)
552{
553 if ((end - start) < parisc_cache_flush_threshold)
554 flush_user_icache_range_asm(start,end);
555 else
556 flush_instruction_cache();
557}
558
Randolph Chungd6ce8622006-12-12 05:51:54 -0800559void flush_cache_range(struct vm_area_struct *vma,
560 unsigned long start, unsigned long end)
561{
Helge Deller8980a7b2009-01-06 12:57:01 +0100562 BUG_ON(!vma->vm_mm->context);
Randolph Chungd6ce8622006-12-12 05:51:54 -0800563
John David Anglin6d2439d2013-02-03 23:01:47 +0000564 if ((end - start) < parisc_cache_flush_threshold) {
565 if (vma->vm_mm->context == mfsp(3)) {
566 flush_user_dcache_range_asm(start, end);
567 if (vma->vm_flags & VM_EXEC)
568 flush_user_icache_range_asm(start, end);
569 } else {
570 unsigned long addr;
571 pgd_t *pgd = vma->vm_mm->pgd;
572
573 for (addr = start & PAGE_MASK; addr < end;
574 addr += PAGE_SIZE) {
575 pte_t *ptep = get_ptep(pgd, addr);
576 if (ptep != NULL) {
577 pte_t pte = *ptep;
578 flush_cache_page(vma,
579 addr, pte_pfn(pte));
580 }
581 }
582 }
Randolph Chungd6ce8622006-12-12 05:51:54 -0800583 } else {
John David Anglin6d2439d2013-02-03 23:01:47 +0000584#ifdef CONFIG_SMP
Randolph Chungd6ce8622006-12-12 05:51:54 -0800585 flush_cache_all();
John David Anglin6d2439d2013-02-03 23:01:47 +0000586#else
587 flush_cache_all_local();
588#endif
Randolph Chungd6ce8622006-12-12 05:51:54 -0800589 }
590}
591
592void
593flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
594{
595 BUG_ON(!vma->vm_mm->context);
596
James Bottomleyb7d45812011-04-15 12:37:22 -0500597 flush_tlb_page(vma, vmaddr);
James Bottomleyf3118472010-12-22 10:22:11 -0600598 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
Randolph Chungd6ce8622006-12-12 05:51:54 -0800599
600}
John David Anglincca8e902013-02-03 23:02:49 +0000601
602#ifdef CONFIG_PARISC_TMPALIAS
603
604void clear_user_highpage(struct page *page, unsigned long vaddr)
605{
606 void *vto;
607 unsigned long flags;
608
609 /* Clear using TMPALIAS region. The page doesn't need to
610 be flushed but the kernel mapping needs to be purged. */
611
612 vto = kmap_atomic(page, KM_USER0);
613
614 /* The PA-RISC 2.0 Architecture book states on page F-6:
615 "Before a write-capable translation is enabled, *all*
616 non-equivalently-aliased translations must be removed
617 from the page table and purged from the TLB. (Note
618 that the caches are not required to be flushed at this
619 time.) Before any non-equivalent aliased translation
620 is re-enabled, the virtual address range for the writeable
621 page (the entire page) must be flushed from the cache,
622 and the write-capable translation removed from the page
623 table and purged from the TLB." */
624
625 purge_kernel_dcache_page_asm((unsigned long)vto);
626 purge_tlb_start(flags);
627 pdtlb_kernel(vto);
628 purge_tlb_end(flags);
629 preempt_disable();
630 clear_user_page_asm(vto, vaddr);
631 preempt_enable();
632
633 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
634}
635
636void copy_user_highpage(struct page *to, struct page *from,
637 unsigned long vaddr, struct vm_area_struct *vma)
638{
639 void *vfrom, *vto;
640 unsigned long flags;
641
642 /* Copy using TMPALIAS region. This has the advantage
643 that the `from' page doesn't need to be flushed. However,
644 the `to' page must be flushed in copy_user_page_asm since
645 it can be used to bring in executable code. */
646
647 vfrom = kmap_atomic(from, KM_USER0);
648 vto = kmap_atomic(to, KM_USER1);
649
650 purge_kernel_dcache_page_asm((unsigned long)vto);
651 purge_tlb_start(flags);
652 pdtlb_kernel(vto);
653 pdtlb_kernel(vfrom);
654 purge_tlb_end(flags);
655 preempt_disable();
656 copy_user_page_asm(vto, vfrom, vaddr);
657 flush_dcache_page_asm(__pa(vto), vaddr);
658 preempt_enable();
659
660 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
661 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
662}
663
664#endif /* CONFIG_PARISC_TMPALIAS */