blob: 924f3e4b3a82f145859bf4379f1b1c5d297179ed [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090024#include <linux/io.h>
25#include <asm/sizes.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090026#include <asm/system.h>
27#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090028#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090029#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090030#include <asm/mmu.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090031#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090032
Paul Mundt51becfd2010-02-17 15:33:30 +090033static void pmb_unmap_entry(struct pmb_entry *);
Matt Flemingfc2bdef2009-10-06 21:22:22 +000034
Matt Flemingedd7de82009-10-06 21:22:29 +000035static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt51becfd2010-02-17 15:33:30 +090036static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090037
Paul Mundt51becfd2010-02-17 15:33:30 +090038static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090039{
40 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
41}
42
Paul Mundt51becfd2010-02-17 15:33:30 +090043static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090044{
45 return mk_pmb_entry(entry) | PMB_ADDR;
46}
47
Paul Mundt51becfd2010-02-17 15:33:30 +090048static __always_inline unsigned long mk_pmb_data(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090049{
50 return mk_pmb_entry(entry) | PMB_DATA;
51}
52
Matt Fleming067784f2009-10-06 21:22:23 +000053static int pmb_alloc_entry(void)
54{
55 unsigned int pos;
56
57repeat:
Paul Mundt51becfd2010-02-17 15:33:30 +090058 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
Matt Fleming067784f2009-10-06 21:22:23 +000059
60 if (unlikely(pos > NR_PMB_ENTRIES))
61 return -ENOSPC;
62
Paul Mundt51becfd2010-02-17 15:33:30 +090063 if (test_and_set_bit(pos, pmb_map))
Matt Fleming067784f2009-10-06 21:22:23 +000064 goto repeat;
65
66 return pos;
67}
68
Matt Fleming8386aeb2009-10-06 21:22:28 +000069static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +000070 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090071{
72 struct pmb_entry *pmbe;
Matt Fleming067784f2009-10-06 21:22:23 +000073 int pos;
74
Matt Fleming20b50142009-10-06 21:22:33 +000075 if (entry == PMB_NO_ENTRY) {
76 pos = pmb_alloc_entry();
77 if (pos < 0)
78 return ERR_PTR(pos);
79 } else {
Paul Mundt51becfd2010-02-17 15:33:30 +090080 if (test_and_set_bit(entry, pmb_map))
Matt Fleming20b50142009-10-06 21:22:33 +000081 return ERR_PTR(-ENOSPC);
82 pos = entry;
83 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +090084
Matt Flemingedd7de82009-10-06 21:22:29 +000085 pmbe = &pmb_entry_list[pos];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090086 if (!pmbe)
87 return ERR_PTR(-ENOMEM);
88
89 pmbe->vpn = vpn;
90 pmbe->ppn = ppn;
91 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +000092 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +090093
94 return pmbe;
95}
96
Matt Fleming8386aeb2009-10-06 21:22:28 +000097static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090098{
Matt Flemingedd7de82009-10-06 21:22:29 +000099 int pos = pmbe->entry;
Paul Mundt38c425f2007-05-11 11:26:10 +0900100
Matt Flemingedd7de82009-10-06 21:22:29 +0000101 pmbe->vpn = 0;
102 pmbe->ppn = 0;
103 pmbe->flags = 0;
104 pmbe->entry = 0;
105
Paul Mundt51becfd2010-02-17 15:33:30 +0900106 clear_bit(pos, pmb_map);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900107}
108
109/*
Paul Mundt51becfd2010-02-17 15:33:30 +0900110 * Must be run uncached.
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900111 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900112static void set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900113{
Paul Mundt51becfd2010-02-17 15:33:30 +0900114 jump_to_uncached();
115
116 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900117
Paul Mundte7bd34a2007-07-31 17:07:28 +0900118#ifdef CONFIG_CACHE_WRITETHROUGH
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900119 /*
120 * When we are in 32-bit address extended mode, CCR.CB becomes
121 * invalid, so care must be taken to manually adjust cacheable
122 * translations.
123 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900124 if (likely(pmbe->flags & PMB_C))
125 pmbe->flags |= PMB_WT;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900126#endif
127
Paul Mundt51becfd2010-02-17 15:33:30 +0900128 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900129
Stuart Menefycbaa1182007-11-30 17:06:36 +0900130 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900131}
132
Paul Mundt2dc2f8e2010-01-21 16:05:25 +0900133static void clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900134{
135 unsigned int entry = pmbe->entry;
136 unsigned long addr;
137
Stuart Menefycbaa1182007-11-30 17:06:36 +0900138 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900139
140 /* Clear V-bit */
141 addr = mk_pmb_addr(entry);
Paul Mundt9d56dd32010-01-26 12:58:40 +0900142 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900143
144 addr = mk_pmb_data(entry);
Paul Mundt9d56dd32010-01-26 12:58:40 +0900145 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900146
Stuart Menefycbaa1182007-11-30 17:06:36 +0900147 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900148}
149
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900150static struct {
151 unsigned long size;
152 int flag;
153} pmb_sizes[] = {
Paul Mundt51becfd2010-02-17 15:33:30 +0900154 { .size = SZ_512M, .flag = PMB_SZ_512M, },
155 { .size = SZ_128M, .flag = PMB_SZ_128M, },
156 { .size = SZ_64M, .flag = PMB_SZ_64M, },
157 { .size = SZ_16M, .flag = PMB_SZ_16M, },
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900158};
159
160long pmb_remap(unsigned long vaddr, unsigned long phys,
Paul Mundt7bdda622010-02-17 13:23:00 +0900161 unsigned long size, pgprot_t prot)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900162{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000163 struct pmb_entry *pmbp, *pmbe;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900164 unsigned long wanted;
165 int pmb_flags, i;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000166 long err;
Paul Mundt7bdda622010-02-17 13:23:00 +0900167 u64 flags;
168
169 flags = pgprot_val(prot);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900170
171 /* Convert typical pgprot value to the PMB equivalent */
172 if (flags & _PAGE_CACHABLE) {
173 if (flags & _PAGE_WT)
174 pmb_flags = PMB_WT;
175 else
176 pmb_flags = PMB_C;
177 } else
178 pmb_flags = PMB_WT | PMB_UB;
179
180 pmbp = NULL;
181 wanted = size;
182
183again:
184 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900185 if (size < pmb_sizes[i].size)
186 continue;
187
Matt Fleming20b50142009-10-06 21:22:33 +0000188 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
189 PMB_NO_ENTRY);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000190 if (IS_ERR(pmbe)) {
191 err = PTR_ERR(pmbe);
192 goto out;
193 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900194
Matt Fleming067784f2009-10-06 21:22:23 +0000195 set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900196
197 phys += pmb_sizes[i].size;
198 vaddr += pmb_sizes[i].size;
199 size -= pmb_sizes[i].size;
200
201 /*
202 * Link adjacent entries that span multiple PMB entries
203 * for easier tear-down.
204 */
205 if (likely(pmbp))
206 pmbp->link = pmbe;
207
208 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000209
210 /*
211 * Instead of trying smaller sizes on every iteration
212 * (even if we succeed in allocating space), try using
213 * pmb_sizes[i].size again.
214 */
215 i--;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900216 }
217
218 if (size >= 0x1000000)
219 goto again;
220
221 return wanted - size;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000222
223out:
Paul Mundt51becfd2010-02-17 15:33:30 +0900224 pmb_unmap_entry(pmbp);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000225
226 return err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900227}
228
229void pmb_unmap(unsigned long addr)
230{
Paul Mundt51becfd2010-02-17 15:33:30 +0900231 struct pmb_entry *pmbe;
Matt Flemingedd7de82009-10-06 21:22:29 +0000232 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900233
Matt Flemingedd7de82009-10-06 21:22:29 +0000234 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900235 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000236 pmbe = &pmb_entry_list[i];
Paul Mundt51becfd2010-02-17 15:33:30 +0900237 if (pmbe->vpn == addr) {
238 pmb_unmap_entry(pmbe);
Matt Flemingedd7de82009-10-06 21:22:29 +0000239 break;
Paul Mundt51becfd2010-02-17 15:33:30 +0900240 }
Matt Flemingedd7de82009-10-06 21:22:29 +0000241 }
242 }
Paul Mundt51becfd2010-02-17 15:33:30 +0900243}
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900244
Paul Mundt51becfd2010-02-17 15:33:30 +0900245static void pmb_unmap_entry(struct pmb_entry *pmbe)
246{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900247 if (unlikely(!pmbe))
248 return;
249
Paul Mundt51becfd2010-02-17 15:33:30 +0900250 if (!test_bit(pmbe->entry, pmb_map)) {
251 WARN_ON(1);
252 return;
253 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900254
255 do {
256 struct pmb_entry *pmblink = pmbe;
257
Matt Fleming067784f2009-10-06 21:22:23 +0000258 /*
259 * We may be called before this pmb_entry has been
260 * entered into the PMB table via set_pmb_entry(), but
261 * that's OK because we've allocated a unique slot for
262 * this entry in pmb_alloc() (even if we haven't filled
263 * it yet).
264 *
265 * Therefore, calling clear_pmb_entry() is safe as no
266 * other mapping can be using that slot.
267 */
268 clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000269
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900270 pmbe = pmblink->link;
271
272 pmb_free(pmblink);
273 } while (pmbe);
274}
275
Paul Mundtefd54ea2010-02-16 18:39:30 +0900276static inline void
277pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn)
Matt Fleming3d467672010-01-18 19:33:10 +0900278{
Paul Mundtefd54ea2010-02-16 18:39:30 +0900279 unsigned int size;
280 const char *sz_str;
281
282 size = data_val & PMB_SZ_MASK;
283
284 sz_str = (size == PMB_SZ_16M) ? " 16MB":
285 (size == PMB_SZ_64M) ? " 64MB":
286 (size == PMB_SZ_128M) ? "128MB":
287 "512MB";
288
289 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
290 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
291 (data_val & PMB_C) ? "" : "un");
Matt Fleming3d467672010-01-18 19:33:10 +0900292}
293
Paul Mundtefd54ea2010-02-16 18:39:30 +0900294static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
295{
296 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
297}
298
299static int pmb_synchronize_mappings(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000300{
Matt Fleming3d467672010-01-18 19:33:10 +0900301 unsigned int applied = 0;
302 int i;
303
Paul Mundtefd54ea2010-02-16 18:39:30 +0900304 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900305
306 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900307 * Run through the initial boot mappings, log the established
308 * ones, and blow away anything that falls outside of the valid
309 * PPN range. Specifically, we only care about existing mappings
310 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900311 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900312 * Note that touching these can be a bit of a minefield; the boot
313 * loader can establish multi-page mappings with the same caching
314 * attributes, so we need to ensure that we aren't modifying a
315 * mapping that we're presently executing from, or may execute
316 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900317 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900318 * In the future we will have to tidy up after the boot loader by
319 * jumping between the cached and uncached mappings and tearing
320 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900321 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900322 for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming3d467672010-01-18 19:33:10 +0900323 unsigned long addr, data;
324 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900325 unsigned long ppn, vpn, flags;
326 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900327
328 addr = mk_pmb_addr(i);
329 data = mk_pmb_data(i);
330
331 addr_val = __raw_readl(addr);
332 data_val = __raw_readl(data);
333
334 /*
335 * Skip over any bogus entries
336 */
337 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
338 continue;
339
340 ppn = data_val & PMB_PFN_MASK;
341 vpn = addr_val & PMB_PFN_MASK;
342
343 /*
344 * Only preserve in-range mappings.
345 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900346 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900347 /*
348 * Invalidate anything out of bounds.
349 */
350 __raw_writel(addr_val & ~PMB_V, addr);
351 __raw_writel(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900352 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900353 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900354
355 /*
356 * Update the caching attributes if necessary
357 */
358 if (data_val & PMB_C) {
359#if defined(CONFIG_CACHE_WRITETHROUGH)
360 data_val |= PMB_WT;
361#elif defined(CONFIG_CACHE_WRITEBACK)
362 data_val &= ~PMB_WT;
363#else
364 data_val &= ~(PMB_C | PMB_WT);
365#endif
366 __raw_writel(data_val, data);
367 }
368
369 flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK);
370
371 pmbe = pmb_alloc(vpn, ppn, flags, i);
372 if (IS_ERR(pmbe)) {
373 WARN_ON_ONCE(1);
374 continue;
375 }
376
377 pmb_log_mapping(data_val, vpn, ppn);
378
379 applied++;
Matt Fleming3d467672010-01-18 19:33:10 +0900380 }
381
382 return (applied == 0);
383}
Matt Fleming3d467672010-01-18 19:33:10 +0900384
Paul Mundt2dc2f8e2010-01-21 16:05:25 +0900385int pmb_init(void)
Matt Fleming3d467672010-01-18 19:33:10 +0900386{
Paul Mundtefd54ea2010-02-16 18:39:30 +0900387 int ret;
Matt Fleming20b50142009-10-06 21:22:33 +0000388
Matt Fleming3d467672010-01-18 19:33:10 +0900389 jump_to_uncached();
390
391 /*
Matt Fleming3d467672010-01-18 19:33:10 +0900392 * Sync our software copy of the PMB mappings with those in
393 * hardware. The mappings in the hardware PMB were either set up
394 * by the bootloader or very early on by the kernel.
395 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900396 ret = pmb_synchronize_mappings();
397 if (unlikely(ret == 0)) {
398 back_to_cached();
399 return 0;
Matt Fleming20b50142009-10-06 21:22:33 +0000400 }
401
Paul Mundt9d56dd32010-01-26 12:58:40 +0900402 __raw_writel(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900403
Paul Mundta0ab3662010-01-13 18:31:48 +0900404 /* Flush out the TLB */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900405 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900406
Matt Fleming20b50142009-10-06 21:22:33 +0000407 back_to_cached();
408
409 return 0;
410}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900411
Paul Mundt2efa53b2010-01-20 16:40:48 +0900412bool __in_29bit_mode(void)
413{
414 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
415}
416
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900417static int pmb_seq_show(struct seq_file *file, void *iter)
418{
419 int i;
420
421 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
422 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
423 seq_printf(file, "ety vpn ppn size flags\n");
424
425 for (i = 0; i < NR_PMB_ENTRIES; i++) {
426 unsigned long addr, data;
427 unsigned int size;
428 char *sz_str = NULL;
429
Paul Mundt9d56dd32010-01-26 12:58:40 +0900430 addr = __raw_readl(mk_pmb_addr(i));
431 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900432
433 size = data & PMB_SZ_MASK;
434 sz_str = (size == PMB_SZ_16M) ? " 16MB":
435 (size == PMB_SZ_64M) ? " 64MB":
436 (size == PMB_SZ_128M) ? "128MB":
437 "512MB";
438
439 /* 02: V 0x88 0x08 128MB C CB B */
440 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
441 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
442 (addr >> 24) & 0xff, (data >> 24) & 0xff,
443 sz_str, (data & PMB_C) ? 'C' : ' ',
444 (data & PMB_WT) ? "WT" : "CB",
445 (data & PMB_UB) ? "UB" : " B");
446 }
447
448 return 0;
449}
450
451static int pmb_debugfs_open(struct inode *inode, struct file *file)
452{
453 return single_open(file, pmb_seq_show, NULL);
454}
455
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800456static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900457 .owner = THIS_MODULE,
458 .open = pmb_debugfs_open,
459 .read = seq_read,
460 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800461 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900462};
463
464static int __init pmb_debugfs_init(void)
465{
466 struct dentry *dentry;
467
468 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900469 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800470 if (!dentry)
471 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900472 if (IS_ERR(dentry))
473 return PTR_ERR(dentry);
474
475 return 0;
476}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900477postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000478
479#ifdef CONFIG_PM
480static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
481{
482 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000483 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000484
485 /* Restore the PMB after a resume from hibernation */
486 if (state.event == PM_EVENT_ON &&
487 prev_state.event == PM_EVENT_FREEZE) {
488 struct pmb_entry *pmbe;
Matt Flemingedd7de82009-10-06 21:22:29 +0000489 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900490 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000491 pmbe = &pmb_entry_list[i];
492 set_pmb_entry(pmbe);
493 }
494 }
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000495 }
496 prev_state = state;
497 return 0;
498}
499
500static int pmb_sysdev_resume(struct sys_device *dev)
501{
502 return pmb_sysdev_suspend(dev, PMSG_ON);
503}
504
505static struct sysdev_driver pmb_sysdev_driver = {
506 .suspend = pmb_sysdev_suspend,
507 .resume = pmb_sysdev_resume,
508};
509
510static int __init pmb_sysdev_init(void)
511{
512 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
513}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000514subsys_initcall(pmb_sysdev_init);
515#endif