blob: 8f7dbf183fb01f04508519629fbf9040b89ec41e [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Paul Mundta0ab3662010-01-13 18:31:48 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
Paul Mundt0c7b1df2006-09-27 15:08:07 +09007 *
8 * P1/P2 Section mapping definitions from map32.h, which was:
9 *
10 * Copyright 2003 (c) Lineo Solutions,Inc.
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
15 */
16#include <linux/init.h>
17#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000018#include <linux/sysdev.h>
19#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090020#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/bitops.h>
23#include <linux/debugfs.h>
24#include <linux/fs.h>
25#include <linux/seq_file.h>
26#include <linux/err.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090029#include <asm/pgtable.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090030#include <asm/mmu.h>
31#include <asm/io.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090032#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090033
34#define NR_PMB_ENTRIES 16
35
Matt Flemingfc2bdef2009-10-06 21:22:22 +000036static void __pmb_unmap(struct pmb_entry *);
37
Matt Flemingedd7de82009-10-06 21:22:29 +000038static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090039static unsigned long pmb_map;
40
Paul Mundt0c7b1df2006-09-27 15:08:07 +090041static inline unsigned long mk_pmb_entry(unsigned int entry)
42{
43 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
44}
45
46static inline unsigned long mk_pmb_addr(unsigned int entry)
47{
48 return mk_pmb_entry(entry) | PMB_ADDR;
49}
50
51static inline unsigned long mk_pmb_data(unsigned int entry)
52{
53 return mk_pmb_entry(entry) | PMB_DATA;
54}
55
Matt Fleming067784f2009-10-06 21:22:23 +000056static int pmb_alloc_entry(void)
57{
58 unsigned int pos;
59
60repeat:
61 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
62
63 if (unlikely(pos > NR_PMB_ENTRIES))
64 return -ENOSPC;
65
66 if (test_and_set_bit(pos, &pmb_map))
67 goto repeat;
68
69 return pos;
70}
71
Matt Fleming8386aeb2009-10-06 21:22:28 +000072static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +000073 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090074{
75 struct pmb_entry *pmbe;
Matt Fleming067784f2009-10-06 21:22:23 +000076 int pos;
77
Matt Fleming20b50142009-10-06 21:22:33 +000078 if (entry == PMB_NO_ENTRY) {
79 pos = pmb_alloc_entry();
80 if (pos < 0)
81 return ERR_PTR(pos);
82 } else {
83 if (test_bit(entry, &pmb_map))
84 return ERR_PTR(-ENOSPC);
85 pos = entry;
86 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +090087
Matt Flemingedd7de82009-10-06 21:22:29 +000088 pmbe = &pmb_entry_list[pos];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090089 if (!pmbe)
90 return ERR_PTR(-ENOMEM);
91
92 pmbe->vpn = vpn;
93 pmbe->ppn = ppn;
94 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +000095 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +090096
97 return pmbe;
98}
99
Matt Fleming8386aeb2009-10-06 21:22:28 +0000100static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900101{
Matt Flemingedd7de82009-10-06 21:22:29 +0000102 int pos = pmbe->entry;
Paul Mundt38c425f2007-05-11 11:26:10 +0900103
Matt Flemingedd7de82009-10-06 21:22:29 +0000104 pmbe->vpn = 0;
105 pmbe->ppn = 0;
106 pmbe->flags = 0;
107 pmbe->entry = 0;
108
109 clear_bit(pos, &pmb_map);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900110}
111
112/*
113 * Must be in P2 for __set_pmb_entry()
114 */
Matt Fleming8386aeb2009-10-06 21:22:28 +0000115static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
116 unsigned long flags, int pos)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900117{
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900118 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
119
Paul Mundte7bd34a2007-07-31 17:07:28 +0900120#ifdef CONFIG_CACHE_WRITETHROUGH
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900121 /*
122 * When we are in 32-bit address extended mode, CCR.CB becomes
123 * invalid, so care must be taken to manually adjust cacheable
124 * translations.
125 */
126 if (likely(flags & PMB_C))
127 flags |= PMB_WT;
128#endif
129
130 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900131}
132
Matt Fleming8386aeb2009-10-06 21:22:28 +0000133static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900134{
Stuart Menefycbaa1182007-11-30 17:06:36 +0900135 jump_to_uncached();
Matt Fleming067784f2009-10-06 21:22:23 +0000136 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900137 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900138}
139
Matt Fleming8386aeb2009-10-06 21:22:28 +0000140static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900141{
142 unsigned int entry = pmbe->entry;
143 unsigned long addr;
144
Matt Fleming31051212009-10-06 21:22:30 +0000145 if (unlikely(entry >= NR_PMB_ENTRIES))
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900146 return;
147
Stuart Menefycbaa1182007-11-30 17:06:36 +0900148 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900149
150 /* Clear V-bit */
151 addr = mk_pmb_addr(entry);
152 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
153
154 addr = mk_pmb_data(entry);
155 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
156
Stuart Menefycbaa1182007-11-30 17:06:36 +0900157 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900158}
159
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900160
161static struct {
162 unsigned long size;
163 int flag;
164} pmb_sizes[] = {
165 { .size = 0x20000000, .flag = PMB_SZ_512M, },
166 { .size = 0x08000000, .flag = PMB_SZ_128M, },
167 { .size = 0x04000000, .flag = PMB_SZ_64M, },
168 { .size = 0x01000000, .flag = PMB_SZ_16M, },
169};
170
171long pmb_remap(unsigned long vaddr, unsigned long phys,
172 unsigned long size, unsigned long flags)
173{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000174 struct pmb_entry *pmbp, *pmbe;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900175 unsigned long wanted;
176 int pmb_flags, i;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000177 long err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900178
179 /* Convert typical pgprot value to the PMB equivalent */
180 if (flags & _PAGE_CACHABLE) {
181 if (flags & _PAGE_WT)
182 pmb_flags = PMB_WT;
183 else
184 pmb_flags = PMB_C;
185 } else
186 pmb_flags = PMB_WT | PMB_UB;
187
188 pmbp = NULL;
189 wanted = size;
190
191again:
192 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900193 if (size < pmb_sizes[i].size)
194 continue;
195
Matt Fleming20b50142009-10-06 21:22:33 +0000196 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
197 PMB_NO_ENTRY);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000198 if (IS_ERR(pmbe)) {
199 err = PTR_ERR(pmbe);
200 goto out;
201 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900202
Matt Fleming067784f2009-10-06 21:22:23 +0000203 set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900204
205 phys += pmb_sizes[i].size;
206 vaddr += pmb_sizes[i].size;
207 size -= pmb_sizes[i].size;
208
209 /*
210 * Link adjacent entries that span multiple PMB entries
211 * for easier tear-down.
212 */
213 if (likely(pmbp))
214 pmbp->link = pmbe;
215
216 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000217
218 /*
219 * Instead of trying smaller sizes on every iteration
220 * (even if we succeed in allocating space), try using
221 * pmb_sizes[i].size again.
222 */
223 i--;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900224 }
225
226 if (size >= 0x1000000)
227 goto again;
228
229 return wanted - size;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000230
231out:
232 if (pmbp)
233 __pmb_unmap(pmbp);
234
235 return err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900236}
237
238void pmb_unmap(unsigned long addr)
239{
Matt Flemingedd7de82009-10-06 21:22:29 +0000240 struct pmb_entry *pmbe = NULL;
241 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900242
Matt Flemingedd7de82009-10-06 21:22:29 +0000243 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
244 if (test_bit(i, &pmb_map)) {
245 pmbe = &pmb_entry_list[i];
246 if (pmbe->vpn == addr)
247 break;
248 }
249 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900250
251 if (unlikely(!pmbe))
252 return;
253
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000254 __pmb_unmap(pmbe);
255}
256
257static void __pmb_unmap(struct pmb_entry *pmbe)
258{
Matt Flemingedd7de82009-10-06 21:22:29 +0000259 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900260
261 do {
262 struct pmb_entry *pmblink = pmbe;
263
Matt Fleming067784f2009-10-06 21:22:23 +0000264 /*
265 * We may be called before this pmb_entry has been
266 * entered into the PMB table via set_pmb_entry(), but
267 * that's OK because we've allocated a unique slot for
268 * this entry in pmb_alloc() (even if we haven't filled
269 * it yet).
270 *
271 * Therefore, calling clear_pmb_entry() is safe as no
272 * other mapping can be using that slot.
273 */
274 clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000275
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900276 pmbe = pmblink->link;
277
278 pmb_free(pmblink);
279 } while (pmbe);
280}
281
Paul Mundta0ab3662010-01-13 18:31:48 +0900282#ifdef CONFIG_PMB_LEGACY
283static int pmb_apply_legacy_mappings(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000284{
285 int i;
286 unsigned long addr, data;
Paul Mundta0ab3662010-01-13 18:31:48 +0900287 unsigned int applied = 0;
Matt Fleming20b50142009-10-06 21:22:33 +0000288
289 for (i = 0; i < PMB_ENTRY_MAX; i++) {
290 struct pmb_entry *pmbe;
291 unsigned long vpn, ppn, flags;
292
293 addr = PMB_DATA + (i << PMB_E_SHIFT);
294 data = ctrl_inl(addr);
295 if (!(data & PMB_V))
296 continue;
297
298 if (data & PMB_C) {
299#if defined(CONFIG_CACHE_WRITETHROUGH)
300 data |= PMB_WT;
301#elif defined(CONFIG_CACHE_WRITEBACK)
302 data &= ~PMB_WT;
303#else
304 data &= ~(PMB_C | PMB_WT);
305#endif
306 }
307 ctrl_outl(data, addr);
308
309 ppn = data & PMB_PFN_MASK;
310
311 flags = data & (PMB_C | PMB_WT | PMB_UB);
312 flags |= data & PMB_SZ_MASK;
313
314 addr = PMB_ADDR + (i << PMB_E_SHIFT);
315 data = ctrl_inl(addr);
316
317 vpn = data & PMB_PFN_MASK;
318
319 pmbe = pmb_alloc(vpn, ppn, flags, i);
320 WARN_ON(IS_ERR(pmbe));
Paul Mundta0ab3662010-01-13 18:31:48 +0900321
322 applied++;
Matt Fleming20b50142009-10-06 21:22:33 +0000323 }
324
Paul Mundta0ab3662010-01-13 18:31:48 +0900325 return (applied == 0);
326}
327#else
328static inline int pmb_apply_legacy_mappings(void)
329{
330 return 1;
331}
332#endif
333
334int __uses_jump_to_uncached pmb_init(void)
335{
336 unsigned int i;
337 unsigned long size, ret;
338
339 jump_to_uncached();
340
341 /*
342 * Attempt to apply the legacy boot mappings if configured. If
343 * this is successful then we simply carry on with those and
344 * don't bother establishing additional memory mappings. Dynamic
345 * device mappings through pmb_remap() can still be bolted on
346 * after this.
347 */
348 ret = pmb_apply_legacy_mappings();
349 if (ret == 0) {
350 back_to_cached();
351 return 0;
352 }
353
354 /*
355 * Insert PMB entries for the P1 and P2 areas so that, after
356 * we've switched the MMU to 32-bit mode, the semantics of P1
357 * and P2 are the same as in 29-bit mode, e.g.
358 *
359 * P1 - provides a cached window onto physical memory
360 * P2 - provides an uncached window onto physical memory
361 */
362 size = (unsigned long)__MEMORY_START + __MEMORY_SIZE;
363
364 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
365 BUG_ON(ret != size);
366
367 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
368 BUG_ON(ret != size);
369
370 ctrl_outl(0, PMB_IRMCR);
371
372 /* PMB.SE and UB[7] */
373 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
374
375 /* Flush out the TLB */
376 i = ctrl_inl(MMUCR);
377 i |= MMUCR_TI;
378 ctrl_outl(i, MMUCR);
379
Matt Fleming20b50142009-10-06 21:22:33 +0000380 back_to_cached();
381
382 return 0;
383}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900384
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900385static int pmb_seq_show(struct seq_file *file, void *iter)
386{
387 int i;
388
389 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
390 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
391 seq_printf(file, "ety vpn ppn size flags\n");
392
393 for (i = 0; i < NR_PMB_ENTRIES; i++) {
394 unsigned long addr, data;
395 unsigned int size;
396 char *sz_str = NULL;
397
398 addr = ctrl_inl(mk_pmb_addr(i));
399 data = ctrl_inl(mk_pmb_data(i));
400
401 size = data & PMB_SZ_MASK;
402 sz_str = (size == PMB_SZ_16M) ? " 16MB":
403 (size == PMB_SZ_64M) ? " 64MB":
404 (size == PMB_SZ_128M) ? "128MB":
405 "512MB";
406
407 /* 02: V 0x88 0x08 128MB C CB B */
408 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
409 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
410 (addr >> 24) & 0xff, (data >> 24) & 0xff,
411 sz_str, (data & PMB_C) ? 'C' : ' ',
412 (data & PMB_WT) ? "WT" : "CB",
413 (data & PMB_UB) ? "UB" : " B");
414 }
415
416 return 0;
417}
418
419static int pmb_debugfs_open(struct inode *inode, struct file *file)
420{
421 return single_open(file, pmb_seq_show, NULL);
422}
423
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800424static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900425 .owner = THIS_MODULE,
426 .open = pmb_debugfs_open,
427 .read = seq_read,
428 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800429 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900430};
431
432static int __init pmb_debugfs_init(void)
433{
434 struct dentry *dentry;
435
436 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900437 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800438 if (!dentry)
439 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900440 if (IS_ERR(dentry))
441 return PTR_ERR(dentry);
442
443 return 0;
444}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900445postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000446
447#ifdef CONFIG_PM
448static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
449{
450 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000451 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000452
453 /* Restore the PMB after a resume from hibernation */
454 if (state.event == PM_EVENT_ON &&
455 prev_state.event == PM_EVENT_FREEZE) {
456 struct pmb_entry *pmbe;
Matt Flemingedd7de82009-10-06 21:22:29 +0000457 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
458 if (test_bit(i, &pmb_map)) {
459 pmbe = &pmb_entry_list[i];
460 set_pmb_entry(pmbe);
461 }
462 }
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000463 }
464 prev_state = state;
465 return 0;
466}
467
468static int pmb_sysdev_resume(struct sys_device *dev)
469{
470 return pmb_sysdev_suspend(dev, PMSG_ON);
471}
472
473static struct sysdev_driver pmb_sysdev_driver = {
474 .suspend = pmb_sysdev_suspend,
475 .resume = pmb_sysdev_resume,
476};
477
478static int __init pmb_sysdev_init(void)
479{
480 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
481}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000482subsys_initcall(pmb_sysdev_init);
483#endif