blob: 198bcff5e96f864a39e4dbd7c8c5c0980f7d697a [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090024#include <linux/io.h>
Paul Mundtd53a0d32010-02-17 21:17:02 +090025#include <linux/spinlock.h>
26#include <linux/rwlock.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090027#include <asm/sizes.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090028#include <asm/system.h>
29#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090030#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090031#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090032#include <asm/mmu.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090033#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090034
Paul Mundtd53a0d32010-02-17 21:17:02 +090035struct pmb_entry;
36
37struct pmb_entry {
38 unsigned long vpn;
39 unsigned long ppn;
40 unsigned long flags;
41 unsigned long size;
42
43 spinlock_t lock;
44
45 /*
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
48 */
49 int entry;
50
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
53};
54
Paul Mundtd01447b2010-02-18 18:13:51 +090055static void pmb_unmap_entry(struct pmb_entry *, int depth);
Matt Flemingfc2bdef2009-10-06 21:22:22 +000056
Paul Mundtd53a0d32010-02-17 21:17:02 +090057static DEFINE_RWLOCK(pmb_rwlock);
Matt Flemingedd7de82009-10-06 21:22:29 +000058static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt51becfd2010-02-17 15:33:30 +090059static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090060
Paul Mundt51becfd2010-02-17 15:33:30 +090061static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090062{
63 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
64}
65
Paul Mundt51becfd2010-02-17 15:33:30 +090066static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090067{
68 return mk_pmb_entry(entry) | PMB_ADDR;
69}
70
Paul Mundt51becfd2010-02-17 15:33:30 +090071static __always_inline unsigned long mk_pmb_data(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090072{
73 return mk_pmb_entry(entry) | PMB_DATA;
74}
75
Matt Fleming067784f2009-10-06 21:22:23 +000076static int pmb_alloc_entry(void)
77{
Paul Mundtd53a0d32010-02-17 21:17:02 +090078 int pos;
Matt Fleming067784f2009-10-06 21:22:23 +000079
Paul Mundt51becfd2010-02-17 15:33:30 +090080 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
Paul Mundtd53a0d32010-02-17 21:17:02 +090081 if (pos >= 0 && pos < NR_PMB_ENTRIES)
82 __set_bit(pos, pmb_map);
83 else
84 pos = -ENOSPC;
Matt Fleming067784f2009-10-06 21:22:23 +000085
86 return pos;
87}
88
Matt Fleming8386aeb2009-10-06 21:22:28 +000089static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +000090 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090091{
92 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +090093 unsigned long irqflags;
94 void *ret = NULL;
Matt Fleming067784f2009-10-06 21:22:23 +000095 int pos;
96
Paul Mundtd53a0d32010-02-17 21:17:02 +090097 write_lock_irqsave(&pmb_rwlock, irqflags);
98
Matt Fleming20b50142009-10-06 21:22:33 +000099 if (entry == PMB_NO_ENTRY) {
100 pos = pmb_alloc_entry();
Paul Mundtd53a0d32010-02-17 21:17:02 +0900101 if (unlikely(pos < 0)) {
102 ret = ERR_PTR(pos);
103 goto out;
104 }
Matt Fleming20b50142009-10-06 21:22:33 +0000105 } else {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900106 if (__test_and_set_bit(entry, pmb_map)) {
107 ret = ERR_PTR(-ENOSPC);
108 goto out;
109 }
110
Matt Fleming20b50142009-10-06 21:22:33 +0000111 pos = entry;
112 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900113
Paul Mundtd53a0d32010-02-17 21:17:02 +0900114 write_unlock_irqrestore(&pmb_rwlock, irqflags);
115
Matt Flemingedd7de82009-10-06 21:22:29 +0000116 pmbe = &pmb_entry_list[pos];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900117
Paul Mundtd01447b2010-02-18 18:13:51 +0900118 memset(pmbe, 0, sizeof(struct pmb_entry));
119
Paul Mundtd53a0d32010-02-17 21:17:02 +0900120 spin_lock_init(&pmbe->lock);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900121
122 pmbe->vpn = vpn;
123 pmbe->ppn = ppn;
124 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +0000125 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900126
127 return pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900128
129out:
130 write_unlock_irqrestore(&pmb_rwlock, irqflags);
131 return ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900132}
133
Matt Fleming8386aeb2009-10-06 21:22:28 +0000134static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900135{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900136 __clear_bit(pmbe->entry, pmb_map);
Paul Mundtd01447b2010-02-18 18:13:51 +0900137
138 pmbe->entry = PMB_NO_ENTRY;
139 pmbe->link = NULL;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900140}
141
142/*
Paul Mundt0065b962010-02-17 18:05:23 +0900143 * Ensure that the PMB entries match our cache configuration.
144 *
145 * When we are in 32-bit address extended mode, CCR.CB becomes
146 * invalid, so care must be taken to manually adjust cacheable
147 * translations.
148 */
149static __always_inline unsigned long pmb_cache_flags(void)
150{
151 unsigned long flags = 0;
152
153#if defined(CONFIG_CACHE_WRITETHROUGH)
154 flags |= PMB_C | PMB_WT | PMB_UB;
155#elif defined(CONFIG_CACHE_WRITEBACK)
156 flags |= PMB_C;
157#endif
158
159 return flags;
160}
161
162/*
Paul Mundt51becfd2010-02-17 15:33:30 +0900163 * Must be run uncached.
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900164 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900165static void __set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900166{
Paul Mundt2e450642010-02-18 13:26:05 +0900167 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
168 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
169 mk_pmb_data(pmbe->entry));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900170}
171
Paul Mundtd53a0d32010-02-17 21:17:02 +0900172static void __clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900173{
Paul Mundt2e450642010-02-18 13:26:05 +0900174 unsigned long addr, data;
175 unsigned long addr_val, data_val;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900176
Paul Mundt2e450642010-02-18 13:26:05 +0900177 addr = mk_pmb_addr(pmbe->entry);
178 data = mk_pmb_data(pmbe->entry);
179
180 addr_val = __raw_readl(addr);
181 data_val = __raw_readl(data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900182
183 /* Clear V-bit */
Paul Mundt2e450642010-02-18 13:26:05 +0900184 writel_uncached(addr_val & ~PMB_V, addr);
185 writel_uncached(data_val & ~PMB_V, data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900186}
187
Paul Mundtd53a0d32010-02-17 21:17:02 +0900188static void set_pmb_entry(struct pmb_entry *pmbe)
189{
190 unsigned long flags;
191
192 spin_lock_irqsave(&pmbe->lock, flags);
193 __set_pmb_entry(pmbe);
194 spin_unlock_irqrestore(&pmbe->lock, flags);
195}
196
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900197static struct {
198 unsigned long size;
199 int flag;
200} pmb_sizes[] = {
Paul Mundt51becfd2010-02-17 15:33:30 +0900201 { .size = SZ_512M, .flag = PMB_SZ_512M, },
202 { .size = SZ_128M, .flag = PMB_SZ_128M, },
203 { .size = SZ_64M, .flag = PMB_SZ_64M, },
204 { .size = SZ_16M, .flag = PMB_SZ_16M, },
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900205};
206
207long pmb_remap(unsigned long vaddr, unsigned long phys,
Paul Mundt7bdda622010-02-17 13:23:00 +0900208 unsigned long size, pgprot_t prot)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900209{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000210 struct pmb_entry *pmbp, *pmbe;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900211 unsigned long wanted;
212 int pmb_flags, i;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000213 long err;
Paul Mundt7bdda622010-02-17 13:23:00 +0900214 u64 flags;
215
216 flags = pgprot_val(prot);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900217
Paul Mundt0065b962010-02-17 18:05:23 +0900218 pmb_flags = PMB_WT | PMB_UB;
219
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900220 /* Convert typical pgprot value to the PMB equivalent */
221 if (flags & _PAGE_CACHABLE) {
Paul Mundt0065b962010-02-17 18:05:23 +0900222 pmb_flags |= PMB_C;
223
224 if ((flags & _PAGE_WT) == 0)
225 pmb_flags &= ~(PMB_WT | PMB_UB);
226 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900227
228 pmbp = NULL;
229 wanted = size;
230
231again:
232 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900233 unsigned long flags;
234
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900235 if (size < pmb_sizes[i].size)
236 continue;
237
Matt Fleming20b50142009-10-06 21:22:33 +0000238 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
239 PMB_NO_ENTRY);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000240 if (IS_ERR(pmbe)) {
241 err = PTR_ERR(pmbe);
242 goto out;
243 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900244
Paul Mundtd53a0d32010-02-17 21:17:02 +0900245 spin_lock_irqsave(&pmbe->lock, flags);
246
247 __set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900248
249 phys += pmb_sizes[i].size;
250 vaddr += pmb_sizes[i].size;
251 size -= pmb_sizes[i].size;
252
Paul Mundtd7813bc2010-02-17 17:56:38 +0900253 pmbe->size = pmb_sizes[i].size;
254
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900255 /*
256 * Link adjacent entries that span multiple PMB entries
257 * for easier tear-down.
258 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900259 if (likely(pmbp)) {
260 spin_lock(&pmbp->lock);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900261 pmbp->link = pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900262 spin_unlock(&pmbp->lock);
263 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900264
265 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000266
267 /*
268 * Instead of trying smaller sizes on every iteration
269 * (even if we succeed in allocating space), try using
270 * pmb_sizes[i].size again.
271 */
272 i--;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900273
274 spin_unlock_irqrestore(&pmbe->lock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900275 }
276
Paul Mundtd53a0d32010-02-17 21:17:02 +0900277 if (size >= SZ_16M)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900278 goto again;
279
280 return wanted - size;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000281
282out:
Paul Mundtd01447b2010-02-18 18:13:51 +0900283 pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000284
285 return err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900286}
287
288void pmb_unmap(unsigned long addr)
289{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900290 struct pmb_entry *pmbe = NULL;
Matt Flemingedd7de82009-10-06 21:22:29 +0000291 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900292
Paul Mundtd53a0d32010-02-17 21:17:02 +0900293 read_lock(&pmb_rwlock);
294
Matt Flemingedd7de82009-10-06 21:22:29 +0000295 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900296 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000297 pmbe = &pmb_entry_list[i];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900298 if (pmbe->vpn == addr)
Matt Flemingedd7de82009-10-06 21:22:29 +0000299 break;
300 }
301 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900302
303 read_unlock(&pmb_rwlock);
304
Paul Mundtd01447b2010-02-18 18:13:51 +0900305 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
Paul Mundt51becfd2010-02-17 15:33:30 +0900306}
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900307
Paul Mundtd01447b2010-02-18 18:13:51 +0900308static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
Paul Mundt51becfd2010-02-17 15:33:30 +0900309{
Paul Mundtd01447b2010-02-18 18:13:51 +0900310 return (b->vpn == (a->vpn + a->size)) &&
311 (b->ppn == (a->ppn + a->size)) &&
312 (b->flags == a->flags);
313}
Paul Mundtd53a0d32010-02-17 21:17:02 +0900314
Paul Mundtd01447b2010-02-18 18:13:51 +0900315static bool pmb_size_valid(unsigned long size)
316{
317 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900318
Paul Mundtd01447b2010-02-18 18:13:51 +0900319 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
320 if (pmb_sizes[i].size == size)
321 return true;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900322
Paul Mundtd01447b2010-02-18 18:13:51 +0900323 return false;
324}
325
326static int pmb_size_to_flags(unsigned long size)
327{
328 int i;
329
330 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
331 if (pmb_sizes[i].size == size)
332 return pmb_sizes[i].flag;
333
334 return 0;
335}
336
337static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
338{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900339 do {
340 struct pmb_entry *pmblink = pmbe;
341
Matt Fleming067784f2009-10-06 21:22:23 +0000342 /*
343 * We may be called before this pmb_entry has been
344 * entered into the PMB table via set_pmb_entry(), but
345 * that's OK because we've allocated a unique slot for
346 * this entry in pmb_alloc() (even if we haven't filled
347 * it yet).
348 *
Paul Mundtd53a0d32010-02-17 21:17:02 +0900349 * Therefore, calling __clear_pmb_entry() is safe as no
Matt Fleming067784f2009-10-06 21:22:23 +0000350 * other mapping can be using that slot.
351 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900352 __clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000353
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900354 pmbe = pmblink->link;
355
356 pmb_free(pmblink);
Paul Mundtd01447b2010-02-18 18:13:51 +0900357 } while (pmbe && --depth);
358}
Paul Mundtd53a0d32010-02-17 21:17:02 +0900359
Paul Mundtd01447b2010-02-18 18:13:51 +0900360static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
361{
362 unsigned long flags;
363
364 if (unlikely(!pmbe))
365 return;
366
367 write_lock_irqsave(&pmb_rwlock, flags);
368 __pmb_unmap_entry(pmbe, depth);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900369 write_unlock_irqrestore(&pmb_rwlock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900370}
371
Paul Mundtd7813bc2010-02-17 17:56:38 +0900372static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
Paul Mundtefd54ea2010-02-16 18:39:30 +0900373{
374 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
375}
376
Paul Mundtd01447b2010-02-18 18:13:51 +0900377static void __init pmb_notify(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000378{
Paul Mundtd01447b2010-02-18 18:13:51 +0900379 int i;
Matt Fleming3d467672010-01-18 19:33:10 +0900380
Paul Mundtefd54ea2010-02-16 18:39:30 +0900381 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900382
Paul Mundtd01447b2010-02-18 18:13:51 +0900383 read_lock(&pmb_rwlock);
384
385 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
386 struct pmb_entry *pmbe;
387
388 if (!test_bit(i, pmb_map))
389 continue;
390
391 pmbe = &pmb_entry_list[i];
392
393 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
394 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
395 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
396 }
397
398 read_unlock(&pmb_rwlock);
399}
400
401/*
402 * Sync our software copy of the PMB mappings with those in hardware. The
403 * mappings in the hardware PMB were either set up by the bootloader or
404 * very early on by the kernel.
405 */
406static void __init pmb_synchronize(void)
407{
408 struct pmb_entry *pmbp = NULL;
409 int i, j;
410
Matt Fleming3d467672010-01-18 19:33:10 +0900411 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900412 * Run through the initial boot mappings, log the established
413 * ones, and blow away anything that falls outside of the valid
414 * PPN range. Specifically, we only care about existing mappings
415 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900416 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900417 * Note that touching these can be a bit of a minefield; the boot
418 * loader can establish multi-page mappings with the same caching
419 * attributes, so we need to ensure that we aren't modifying a
420 * mapping that we're presently executing from, or may execute
421 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900422 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900423 * In the future we will have to tidy up after the boot loader by
424 * jumping between the cached and uncached mappings and tearing
425 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900426 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900427 for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming3d467672010-01-18 19:33:10 +0900428 unsigned long addr, data;
429 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900430 unsigned long ppn, vpn, flags;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900431 unsigned long irqflags;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900432 unsigned int size;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900433 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900434
435 addr = mk_pmb_addr(i);
436 data = mk_pmb_data(i);
437
438 addr_val = __raw_readl(addr);
439 data_val = __raw_readl(data);
440
441 /*
442 * Skip over any bogus entries
443 */
444 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
445 continue;
446
447 ppn = data_val & PMB_PFN_MASK;
448 vpn = addr_val & PMB_PFN_MASK;
449
450 /*
451 * Only preserve in-range mappings.
452 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900453 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900454 /*
455 * Invalidate anything out of bounds.
456 */
Paul Mundt2e450642010-02-18 13:26:05 +0900457 writel_uncached(addr_val & ~PMB_V, addr);
458 writel_uncached(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900459 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900460 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900461
462 /*
463 * Update the caching attributes if necessary
464 */
465 if (data_val & PMB_C) {
Paul Mundt0065b962010-02-17 18:05:23 +0900466 data_val &= ~PMB_CACHE_MASK;
467 data_val |= pmb_cache_flags();
Paul Mundt2e450642010-02-18 13:26:05 +0900468
469 writel_uncached(data_val, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900470 }
471
Paul Mundtd7813bc2010-02-17 17:56:38 +0900472 size = data_val & PMB_SZ_MASK;
473 flags = size | (data_val & PMB_CACHE_MASK);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900474
475 pmbe = pmb_alloc(vpn, ppn, flags, i);
476 if (IS_ERR(pmbe)) {
477 WARN_ON_ONCE(1);
478 continue;
479 }
480
Paul Mundtd53a0d32010-02-17 21:17:02 +0900481 spin_lock_irqsave(&pmbe->lock, irqflags);
482
Paul Mundtd7813bc2010-02-17 17:56:38 +0900483 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
484 if (pmb_sizes[j].flag == size)
485 pmbe->size = pmb_sizes[j].size;
486
Paul Mundtd53a0d32010-02-17 21:17:02 +0900487 if (pmbp) {
488 spin_lock(&pmbp->lock);
489
490 /*
491 * Compare the previous entry against the current one to
492 * see if the entries span a contiguous mapping. If so,
Paul Mundtd01447b2010-02-18 18:13:51 +0900493 * setup the entry links accordingly. Compound mappings
494 * are later coalesced.
Paul Mundtd53a0d32010-02-17 21:17:02 +0900495 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900496 if (pmb_can_merge(pmbp, pmbe))
Paul Mundtd53a0d32010-02-17 21:17:02 +0900497 pmbp->link = pmbe;
498
499 spin_unlock(&pmbp->lock);
500 }
Paul Mundtd7813bc2010-02-17 17:56:38 +0900501
502 pmbp = pmbe;
503
Paul Mundtd53a0d32010-02-17 21:17:02 +0900504 spin_unlock_irqrestore(&pmbe->lock, irqflags);
Matt Fleming3d467672010-01-18 19:33:10 +0900505 }
Matt Fleming3d467672010-01-18 19:33:10 +0900506}
Matt Fleming3d467672010-01-18 19:33:10 +0900507
Paul Mundtd01447b2010-02-18 18:13:51 +0900508static void __init pmb_merge(struct pmb_entry *head)
Matt Fleming3d467672010-01-18 19:33:10 +0900509{
Paul Mundtd01447b2010-02-18 18:13:51 +0900510 unsigned long span, newsize;
511 struct pmb_entry *tail;
512 int i = 1, depth = 0;
513
514 span = newsize = head->size;
515
516 tail = head->link;
517 while (tail) {
518 span += tail->size;
519
520 if (pmb_size_valid(span)) {
521 newsize = span;
522 depth = i;
523 }
524
525 /* This is the end of the line.. */
526 if (!tail->link)
527 break;
528
529 tail = tail->link;
530 i++;
531 }
Matt Fleming20b50142009-10-06 21:22:33 +0000532
Matt Fleming3d467672010-01-18 19:33:10 +0900533 /*
Paul Mundtd01447b2010-02-18 18:13:51 +0900534 * The merged page size must be valid.
Matt Fleming3d467672010-01-18 19:33:10 +0900535 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900536 if (!pmb_size_valid(newsize))
537 return;
538
539 head->flags &= ~PMB_SZ_MASK;
540 head->flags |= pmb_size_to_flags(newsize);
541
542 head->size = newsize;
543
544 __pmb_unmap_entry(head->link, depth);
545 __set_pmb_entry(head);
546}
547
548static void __init pmb_coalesce(void)
549{
550 unsigned long flags;
551 int i;
552
553 write_lock_irqsave(&pmb_rwlock, flags);
554
555 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
556 struct pmb_entry *pmbe;
557
558 if (!test_bit(i, pmb_map))
559 continue;
560
561 pmbe = &pmb_entry_list[i];
562
563 /*
564 * We're only interested in compound mappings
565 */
566 if (!pmbe->link)
567 continue;
568
569 /*
570 * Nothing to do if it already uses the largest possible
571 * page size.
572 */
573 if (pmbe->size == SZ_512M)
574 continue;
575
576 pmb_merge(pmbe);
577 }
578
579 write_unlock_irqrestore(&pmb_rwlock, flags);
580}
581
582#ifdef CONFIG_UNCACHED_MAPPING
583static void __init pmb_resize(void)
584{
585 int i;
586
587 /*
588 * If the uncached mapping was constructed by the kernel, it will
589 * already be a reasonable size.
590 */
591 if (uncached_size == SZ_16M)
592 return;
593
594 read_lock(&pmb_rwlock);
595
596 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
597 struct pmb_entry *pmbe;
598 unsigned long flags;
599
600 if (!test_bit(i, pmb_map))
601 continue;
602
603 pmbe = &pmb_entry_list[i];
604
605 if (pmbe->vpn != uncached_start)
606 continue;
607
608 /*
609 * Found it, now resize it.
610 */
611 spin_lock_irqsave(&pmbe->lock, flags);
612
613 pmbe->size = SZ_16M;
614 pmbe->flags &= ~PMB_SZ_MASK;
615 pmbe->flags |= pmb_size_to_flags(pmbe->size);
616
617 uncached_resize(pmbe->size);
618
619 __set_pmb_entry(pmbe);
620
621 spin_unlock_irqrestore(&pmbe->lock, flags);
622 }
623
624 read_lock(&pmb_rwlock);
625}
626#endif
627
628void __init pmb_init(void)
629{
630 /* Synchronize software state */
631 pmb_synchronize();
632
633 /* Attempt to combine compound mappings */
634 pmb_coalesce();
635
636#ifdef CONFIG_UNCACHED_MAPPING
637 /* Resize initial mappings, if necessary */
638 pmb_resize();
639#endif
640
641 /* Log them */
642 pmb_notify();
Matt Fleming20b50142009-10-06 21:22:33 +0000643
Paul Mundt2e450642010-02-18 13:26:05 +0900644 writel_uncached(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900645
Paul Mundta0ab3662010-01-13 18:31:48 +0900646 /* Flush out the TLB */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900647 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
Paul Mundt2e450642010-02-18 13:26:05 +0900648 ctrl_barrier();
Matt Fleming20b50142009-10-06 21:22:33 +0000649}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900650
Paul Mundt2efa53b2010-01-20 16:40:48 +0900651bool __in_29bit_mode(void)
652{
653 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
654}
655
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900656static int pmb_seq_show(struct seq_file *file, void *iter)
657{
658 int i;
659
660 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
661 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
662 seq_printf(file, "ety vpn ppn size flags\n");
663
664 for (i = 0; i < NR_PMB_ENTRIES; i++) {
665 unsigned long addr, data;
666 unsigned int size;
667 char *sz_str = NULL;
668
Paul Mundt9d56dd32010-01-26 12:58:40 +0900669 addr = __raw_readl(mk_pmb_addr(i));
670 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900671
672 size = data & PMB_SZ_MASK;
673 sz_str = (size == PMB_SZ_16M) ? " 16MB":
674 (size == PMB_SZ_64M) ? " 64MB":
675 (size == PMB_SZ_128M) ? "128MB":
676 "512MB";
677
678 /* 02: V 0x88 0x08 128MB C CB B */
679 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
680 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
681 (addr >> 24) & 0xff, (data >> 24) & 0xff,
682 sz_str, (data & PMB_C) ? 'C' : ' ',
683 (data & PMB_WT) ? "WT" : "CB",
684 (data & PMB_UB) ? "UB" : " B");
685 }
686
687 return 0;
688}
689
690static int pmb_debugfs_open(struct inode *inode, struct file *file)
691{
692 return single_open(file, pmb_seq_show, NULL);
693}
694
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800695static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900696 .owner = THIS_MODULE,
697 .open = pmb_debugfs_open,
698 .read = seq_read,
699 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800700 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900701};
702
703static int __init pmb_debugfs_init(void)
704{
705 struct dentry *dentry;
706
707 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900708 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800709 if (!dentry)
710 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900711 if (IS_ERR(dentry))
712 return PTR_ERR(dentry);
713
714 return 0;
715}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900716postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000717
718#ifdef CONFIG_PM
719static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
720{
721 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000722 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000723
724 /* Restore the PMB after a resume from hibernation */
725 if (state.event == PM_EVENT_ON &&
726 prev_state.event == PM_EVENT_FREEZE) {
727 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900728
729 read_lock(&pmb_rwlock);
730
Matt Flemingedd7de82009-10-06 21:22:29 +0000731 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900732 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000733 pmbe = &pmb_entry_list[i];
734 set_pmb_entry(pmbe);
735 }
736 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900737
738 read_unlock(&pmb_rwlock);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000739 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900740
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000741 prev_state = state;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900742
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000743 return 0;
744}
745
746static int pmb_sysdev_resume(struct sys_device *dev)
747{
748 return pmb_sysdev_suspend(dev, PMSG_ON);
749}
750
751static struct sysdev_driver pmb_sysdev_driver = {
752 .suspend = pmb_sysdev_suspend,
753 .resume = pmb_sysdev_resume,
754};
755
756static int __init pmb_sysdev_init(void)
757{
758 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
759}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000760subsys_initcall(pmb_sysdev_init);
761#endif